jackkuo commited on
Commit
844f619
·
verified ·
1 Parent(s): dd03284

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +6 -0
  2. 0dFQT4oBgHgl3EQfDjUj/content/tmp_files/2301.13234v1.pdf.txt +0 -0
  3. 0dFQT4oBgHgl3EQfDjUj/content/tmp_files/load_file.txt +0 -0
  4. 2tFKT4oBgHgl3EQf7y43/vector_store/index.faiss +3 -0
  5. 39E0T4oBgHgl3EQfvAGt/content/tmp_files/2301.02613v1.pdf.txt +1843 -0
  6. 39E0T4oBgHgl3EQfvAGt/content/tmp_files/load_file.txt +0 -0
  7. 4NE0T4oBgHgl3EQfeQCD/content/tmp_files/2301.02388v1.pdf.txt +717 -0
  8. 4NE0T4oBgHgl3EQfeQCD/content/tmp_files/load_file.txt +347 -0
  9. 4tE0T4oBgHgl3EQfegAX/content/tmp_files/2301.02390v1.pdf.txt +491 -0
  10. 4tE0T4oBgHgl3EQfegAX/content/tmp_files/load_file.txt +281 -0
  11. 6tAzT4oBgHgl3EQfEvoZ/content/tmp_files/2301.00997v1.pdf.txt +848 -0
  12. 9NE1T4oBgHgl3EQf7wX0/content/tmp_files/2301.03539v1.pdf.txt +1782 -0
  13. 9NE1T4oBgHgl3EQf7wX0/content/tmp_files/load_file.txt +0 -0
  14. 9NE2T4oBgHgl3EQflwcz/content/tmp_files/2301.03991v1.pdf.txt +1075 -0
  15. 9NE2T4oBgHgl3EQflwcz/content/tmp_files/load_file.txt +0 -0
  16. AdE1T4oBgHgl3EQf9Aai/content/tmp_files/2301.03552v1.pdf.txt +1395 -0
  17. AdE1T4oBgHgl3EQf9Aai/content/tmp_files/load_file.txt +0 -0
  18. BtFKT4oBgHgl3EQfXS78/content/2301.11794v1.pdf +3 -0
  19. F9E5T4oBgHgl3EQfVg_E/content/tmp_files/2301.05552v1.pdf.txt +1587 -0
  20. F9E5T4oBgHgl3EQfVg_E/content/tmp_files/load_file.txt +0 -0
  21. FNE0T4oBgHgl3EQfQwDu/content/tmp_files/2301.02199v1.pdf.txt +684 -0
  22. FNE0T4oBgHgl3EQfQwDu/content/tmp_files/load_file.txt +0 -0
  23. HdFJT4oBgHgl3EQfFCyj/content/tmp_files/2301.11440v1.pdf.txt +809 -0
  24. HdFJT4oBgHgl3EQfFCyj/content/tmp_files/load_file.txt +0 -0
  25. MtFJT4oBgHgl3EQfzS0r/content/2301.11642v1.pdf +3 -0
  26. NtFQT4oBgHgl3EQfWjbh/content/tmp_files/2301.13305v1.pdf.txt +371 -0
  27. NtFQT4oBgHgl3EQfWjbh/content/tmp_files/load_file.txt +301 -0
  28. OdAyT4oBgHgl3EQf7PpY/content/tmp_files/2301.00835v1.pdf.txt +3589 -0
  29. OdAyT4oBgHgl3EQf7PpY/content/tmp_files/load_file.txt +0 -0
  30. OtAzT4oBgHgl3EQfIftM/content/tmp_files/2301.01062v1.pdf.txt +2037 -0
  31. OtAzT4oBgHgl3EQfIftM/content/tmp_files/load_file.txt +0 -0
  32. PNFJT4oBgHgl3EQfIiwq/content/tmp_files/2301.11456v1.pdf.txt +0 -0
  33. PNFJT4oBgHgl3EQfIiwq/content/tmp_files/load_file.txt +0 -0
  34. QNFPT4oBgHgl3EQfojUh/content/tmp_files/2301.13134v1.pdf.txt +0 -0
  35. QNFPT4oBgHgl3EQfojUh/content/tmp_files/load_file.txt +0 -0
  36. TdE4T4oBgHgl3EQfLwwK/content/tmp_files/2301.04940v1.pdf.txt +262 -0
  37. TdE4T4oBgHgl3EQfLwwK/content/tmp_files/load_file.txt +267 -0
  38. XdE0T4oBgHgl3EQfmgEE/content/tmp_files/2301.02498v1.pdf.txt +3030 -0
  39. XdE0T4oBgHgl3EQfmgEE/content/tmp_files/load_file.txt +0 -0
  40. c9E3T4oBgHgl3EQfeQoX/content/2301.04541v1.pdf +3 -0
  41. dNE2T4oBgHgl3EQfGAZK/content/2301.03652v1.pdf +3 -0
  42. dNFRT4oBgHgl3EQfTjeD/content/tmp_files/2301.13533v1.pdf.txt +2769 -0
  43. dNFRT4oBgHgl3EQfTjeD/content/tmp_files/load_file.txt +0 -0
  44. dtE3T4oBgHgl3EQfegpr/content/tmp_files/2301.04544v1.pdf.txt +1004 -0
  45. dtE3T4oBgHgl3EQfegpr/content/tmp_files/load_file.txt +0 -0
  46. gNE3T4oBgHgl3EQffgrq/content/tmp_files/2301.04554v1.pdf.txt +2572 -0
  47. gNE3T4oBgHgl3EQffgrq/content/tmp_files/load_file.txt +0 -0
  48. hNAzT4oBgHgl3EQf4f5B/content/tmp_files/2301.01844v1.pdf.txt +2136 -0
  49. hNAzT4oBgHgl3EQf4f5B/content/tmp_files/load_file.txt +0 -0
  50. ktE5T4oBgHgl3EQfGw6j/content/tmp_files/2301.05434v1.pdf.txt +1103 -0
.gitattributes CHANGED
@@ -185,3 +185,9 @@ V9AzT4oBgHgl3EQfJ_vP/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -tex
185
  x9FJT4oBgHgl3EQfhSy6/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
186
  XNAyT4oBgHgl3EQf9PpX/content/2301.00870v1.pdf filter=lfs diff=lfs merge=lfs -text
187
  o9FMT4oBgHgl3EQf7jFB/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
185
  x9FJT4oBgHgl3EQfhSy6/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
186
  XNAyT4oBgHgl3EQf9PpX/content/2301.00870v1.pdf filter=lfs diff=lfs merge=lfs -text
187
  o9FMT4oBgHgl3EQf7jFB/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
188
+ mtE1T4oBgHgl3EQfhQRC/content/2301.03238v1.pdf filter=lfs diff=lfs merge=lfs -text
189
+ c9E3T4oBgHgl3EQfeQoX/content/2301.04541v1.pdf filter=lfs diff=lfs merge=lfs -text
190
+ dNE2T4oBgHgl3EQfGAZK/content/2301.03652v1.pdf filter=lfs diff=lfs merge=lfs -text
191
+ MtFJT4oBgHgl3EQfzS0r/content/2301.11642v1.pdf filter=lfs diff=lfs merge=lfs -text
192
+ BtFKT4oBgHgl3EQfXS78/content/2301.11794v1.pdf filter=lfs diff=lfs merge=lfs -text
193
+ 2tFKT4oBgHgl3EQf7y43/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
0dFQT4oBgHgl3EQfDjUj/content/tmp_files/2301.13234v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
0dFQT4oBgHgl3EQfDjUj/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
2tFKT4oBgHgl3EQf7y43/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cea52c9bf2245480142f3747ad82d1520896e5c01b54cc7e6e13d038f7b2bf0
3
+ size 2621485
39E0T4oBgHgl3EQfvAGt/content/tmp_files/2301.02613v1.pdf.txt ADDED
@@ -0,0 +1,1843 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1
2
+ Learning Deep MRI Reconstruction Models from
3
+ Scratch in Low-Data Regimes
4
+ Salman Ul Hassan Dar, S¸ aban ¨Ozt¨urk, Muzaffer ¨Ozbey, and Tolga C¸ ukur∗
5
+ Abstract— Magnetic resonance imaging (MRI) is an es-
6
+ sential diagnostic tool that suffers from prolonged scan
7
+ times. Reconstruction methods can alleviate this limitation
8
+ by recovering clinically usable images from accelerated ac-
9
+ quisitions. In particular, learning-based methods promise
10
+ performance leaps by employing deep neural networks as
11
+ data-driven priors. A powerful approach uses scan-specific
12
+ (SS) priors that leverage information regarding the underly-
13
+ ing physical signal model for reconstruction. SS priors are
14
+ learned on each individual test scan without the need for
15
+ a training dataset, albeit they suffer from computationally
16
+ burdening inference with nonlinear networks. An alterna-
17
+ tive approach uses scan-general (SG) priors that instead
18
+ leverage information regarding the latent features of MRI
19
+ images for reconstruction. SG priors are frozen at test
20
+ time for efficiency, albeit they require learning from a large
21
+ training dataset. Here, we introduce a novel parallel-stream
22
+ fusion model (PSFNet) that synergistically fuses SS and
23
+ SG priors for performant MRI reconstruction in low-data
24
+ regimes, while maintaining competitive inference times to
25
+ SG methods. PSFNet implements its SG prior based on a
26
+ nonlinear network, yet it forms its SS prior based on a linear
27
+ network to maintain efficiency. A pervasive framework for
28
+ combining multiple priors in MRI reconstruction is algo-
29
+ rithmic unrolling that uses serially alternated projections,
30
+ causing error propagation under low-data regimes. To alle-
31
+ viate error propagation, PSFNet combines its SS and SG
32
+ priors via a novel parallel-stream architecture with learn-
33
+ able fusion parameters. Demonstrations are performed on
34
+ multi-coil brain MRI for varying amounts of training data.
35
+ PSFNet outperforms SG methods in low-data regimes, and
36
+ surpasses SS methods with few tens of training samples.
37
+ In both supervised and unsupervised setups, PSFNet re-
38
+ quires an order of magnitude lower samples compared to
39
+ SG methods, and enables an order of magnitude faster
40
+ inference compared to SS methods. Thus, the proposed
41
+ model improves deep MRI reconstruction with elevated
42
+ learning and computational efficiency.
43
+ Index Terms— image reconstruction, deep learning, scan
44
+ specific, scan general, low data, supervised, unsupervised.
45
+ This work was supported in part by a TUBA GEBIP 2015 fellowship,
46
+ by a BAGEP 2017 fellowship, and by a TUBITAK 121E488 grant awarded
47
+ to T. C¸ ukur.
48
+ S. UH. Dar, S¸ .
49
+ ¨Ozt¨urk, M.
50
+ ¨Ozbey, and T. C¸ ukur are with the
51
+ Department of Electrical and Electronics Engineering, and the Na-
52
+ tional Magnetic Resonance Research Center, Bilkent University,
53
+ Ankara, Turkey (e-mails: {salman,muzaffer,cukur}@ee.bilkent.edu.tr,
54
+ saban.ozturk@amasya.edu.tr). S¸ .
55
+ ¨Ozt¨urk is also with the Amasya
56
+ University, Amasya, Turkey.
57
+ I. INTRODUCTION
58
+ The unparalleled soft-tissue contrast and non-invasiveness
59
+ of MRI render it a preferred modality in many diagnostic
60
+ applications [1], [2], and downstream imaging tasks such
61
+ as classification [3] and segmentation [4], [5]. However, the
62
+ adverse effects of low spin polarization at mainstream field
63
+ strengths on the signal-to-noise ratio make it slower against
64
+ alternate modalities such as CT [6]. Since long scan durations
65
+ inevitably constrain clinical utility, there is an ever-growing
66
+ interest in accelerated MRI methods to improve scan efficiency.
67
+ Accelerated MRI involves an ill-posed inverse problem with the
68
+ aim of mapping undersampled acquisitions in k-space to high-
69
+ quality images corresponding to fully-sampled acquisitions.
70
+ Conventional frameworks for solving this problem rely on
71
+ parallel imaging (PI) capabilities of receive coil arrays [7],
72
+ [8], in conjunction with hand-constructed MRI priors [9], [10].
73
+ A joint objective is iteratively optimized comprising a data-
74
+ consistency (DC) term based on the physical signal model,
75
+ and a regularization term that enforces the MRI prior [9]. The
76
+ physical model constrains reconstructed data to be consistent
77
+ with acquired data while considering coil sensitivities and
78
+ undersampling patterns [11]. Meanwhile, the regularization
79
+ term, often based on a linear transform where data are assumed
80
+ to be compressible [9], introduces suboptimality when the
81
+ distribution of MRI data diverges from the hand-constructed
82
+ prior.
83
+ Deep learning (DL) methods have recently been adopted as
84
+ a promising framework to improve reconstruction performance
85
+ [12]–[16]. Inspired by traditional methods, a powerful approach
86
+ is based on scan-specific (SS) priors that leverage the physical
87
+ signal model to learn a reconstruction specific to each test
88
+ scan, i.e. undersampled k-space data from a given test subject.
89
+ Similar to autocalibration procedures in PI, a first group of
90
+ SS methods perform training using a fully-sampled calibration
91
+ region and then exercise learned dependencies in broader k-
92
+ space [15]–[18]. Following the deep image prior technique, a
93
+ second group of methods use unconditional CNNs as a native
94
+ MRI prior [19]–[21]. These CNNs map low-dimensional latent
95
+ variables onto MR images, and latents and network weights
96
+ are optimized to ensure consistency to acquired data based on
97
+ the physical signal model. In general, SS priors learned on
98
+ each subject at test time avoid the need for separate training
99
+ datasets, and promise improved reliability against atypical
100
+ anatomy. However, they suffer from long inference times that
101
+ can be prohibitive particularly when nonlinear networks are
102
+ arXiv:2301.02613v1 [eess.IV] 6 Jan 2023
103
+
104
+ EMB
105
+ NPS
106
+ UFFC
107
+ SignalProcessing Society
108
+ 0222
109
+ adopted [22]–[24].
110
+ A fundamental alternative is to employ scan-general (SG)
111
+ priors based on deep nonlinear networks that capture latent fea-
112
+ tures of MR images [12]–[14], [25]–[33]. Numerous successful
113
+ architectures have been reported including perceptrons [34],
114
+ basic convolutional neural networks (CNNs) [35]–[38], residual
115
+ or recurrent CNNs [29], [39]–[41], generative adversarial
116
+ networks (GANs) [42]–[46], transformers [47], [48] and
117
+ diffusion models [49], [50]. Physics-guided unrolled methods
118
+ have received particular attention that combine the physical
119
+ signal model as in traditional frameworks and regularization
120
+ via a deep network serving as an SG prior [13], [27], [51]–[53].
121
+ Reconstruction is achieved via serially alternated projections
122
+ through the physical signal model and the SG prior [38], [40],
123
+ [54]–[56]. However, under low-data regimes, the suboptimally
124
+ trained SG prior introduces errors that are propagated across
125
+ the unrolled architecture, compromising performance [6], [57],
126
+ [58]. Furthermore, learning of SG priors requires large training
127
+ datasets from several tens to hundreds of subjects [28], [59],
128
+ [60], which can limit practicality.
129
+ Here, we propose a novel parallel-stream fusion model
130
+ (PSFNet) that consolidates SS and SG priors to enable data-
131
+ efficient training and computation-efficient inference in deep
132
+ MRI reconstruction1. PSFNet leverages an SS stream to perform
133
+ linear reconstruction based on the physical signal model, and an
134
+ SG stream to perform nonlinear reconstruction based on a deep
135
+ network. Unlike conventional unrolled methods based on serial
136
+ projections, here we propose a parallel-stream architecture with
137
+ learnable fusion of SS and SG priors. Fusion parameters are
138
+ adapted across cascades and training iterations to emphasize
139
+ task-critical information. Comprehensive experiments on brain
140
+ MRI datasets are reported to demonstrate PSFNet under both
141
+ supervised and unsupervised settings [62]–[66]. PSFNet is
142
+ compared against an unrolled SG method [27], two SS methods
143
+ [17], [67], and conventional SPIRiT reconstructions [11].
144
+ Compared to the unrolled model, PSFNet lowers training data
145
+ requirements an order of magnitude. Compared to SS models,
146
+ PSFNet offers significantly faster inference times. Our main
147
+ contributions are summarized below:
148
+ • A novel cascaded network architecture is introduced
149
+ that adaptively fuses SS and SG priors across cascades
150
+ and training iterations to improve learning-based MRI
151
+ reconstruction in low-data regimes.
152
+ • The SS prior facilitates learning of the SG prior with
153
+ limited data, and empowers PSFNet to successfully
154
+ generalize to out-of-domain samples.
155
+ • The SG prior improves performance by capturing nonlin-
156
+ ear residuals, and enhances resilience against suboptimal
157
+ hyperparameter selection in the SS component.
158
+ • Parallel-stream fusion of SS and SG priors yields robust
159
+ performance with limited training data in both supervised
160
+ and unsupervised settings.
161
+ II. THEORY
162
+ 1see [61] for a preliminary version of this work presented at ISMRM 2021.
163
+ A. Image Reconstruction in Accelerated MRI
164
+ MRI reconstruction is an inverse problem that aims to recover
165
+ an image from a respective undersampled acquisition:
166
+ MFx = y
167
+ (1)
168
+ where F is the Fourier transform, M is the sampling mask
169
+ defining acquired k-space locations, x is the multi-coil image to
170
+ be reconstructed and y are acquired multi-coil k-space data. To
171
+ improve problem conditioning, additional prior information re-
172
+ garding the expected distribution of MR images is incorporated
173
+ in the form of a regularization term:
174
+ ˆx = arg min
175
+ x
176
+ λ||MFx − y||2
177
+ 2 + R(x)
178
+ (2)
179
+ where the first term enforces DC between reconstructed and
180
+ acquired k-space data, R(x) reflects the MRI prior, and λ
181
+ controls the balance between the DC and regularization terms.
182
+ The DC term can be implemented by injecting the acquired
183
+ values of k-space data into the reconstruction [13]. Thus,
184
+ mapping through a DC block is given as:
185
+ fDC(x) = F −1ΛFx +
186
+ λ
187
+ 1 + λF −1y
188
+ (3)
189
+ where Λ is a diagonal matrix with diagonal entries set to
190
+ 1
191
+ 1+λ at
192
+ acquired k-space locations and set to 1 in unacquired locations.
193
+ In traditional methods, the regularization term is based on a
194
+ hand-constructed transform domain where data are assumed to
195
+ have a sparse representation [9]. For improved conformation to
196
+ the distribution of MRI data, recent frameworks instead adopt
197
+ deep network models to capture either SG priors learned from
198
+ a large MRI database with hundreds of subjects, or SS priors
199
+ learned from individual test scans. Learning procedures for the
200
+ two types of priors are discussed below.
201
+ SG priors: In MRI, SG priors are typically adopted to
202
+ suppress aliasing artifacts in the zero-filled reconstruction (i.e.,
203
+ inverse Fourier transform) of undersampled k-space acquisitions
204
+ [27]. A deep network model that performs de-aliasing can be
205
+ learned from a large training dataset of undersampled and
206
+ corresponding fully-sampled k-space acquisitions, and then
207
+ employed to implement R(.) in Eq. 2 during inference. The
208
+ regularization term based on an SG prior is given as:
209
+ RSG (x) = arg min
210
+ x
211
+ ||CSG(F −1y; ˆθSG) − x||2
212
+ 2
213
+ (4)
214
+ where CSG is an image-domain deep network with learned
215
+ parameters ˆθSG. The formulation in Eq. 4 assumes that CSG
216
+ recovers multi-coil output images provided multi-coil input
217
+ images. The parameters θSG for CSG can be learned based
218
+ on a pixel-wise loss between reconstructed and ground-truth
219
+ images. Training is conducted offline via an empirical risk
220
+ minimization approach based on Monte Carlo sampling [13]:
221
+ LSG(θSG) =
222
+ N
223
+
224
+ n=1
225
+ ||CSG(F −1yn; θSG) − ˘xn||p
226
+ (5)
227
+ where N is the number of training scans, n is the training scan
228
+ index, ||.||p denotes ℓp norm, ˘xn is the ground-truth multi-coil
229
+ image derived from the fully-sampled acquisition for the nth
230
+ scan, and yn are respective undersampled k-space data.
231
+
232
+ 3
233
+ A common approach to build CSG is based on unrolled
234
+ architectures that perform cascaded projections through CNN
235
+ blocks to regularize the image and DC blocks to ensure
236
+ conformance to the physical signal model [27]. Given a total
237
+ of K cascades with tied CNN parameters across cascades, the
238
+ mapping through the kth cascade is [13], [68], [69]:
239
+ xr
240
+ k = fDC
241
+
242
+ fSG
243
+
244
+ xr
245
+ k−1; θSG
246
+ ��
247
+ (6)
248
+ where xr
249
+ k is the image for the rth scan (that could be a training
250
+ or test scan) at the output of the kth cascade (k ∈ [1, 2, ..., K]),
251
+ and xr
252
+ 0 = F −1yr where yr are the acquired undersampled data
253
+ for the rth scan. Meanwhile, fSG is the CNN block embedded
254
+ in the kth cascade with parameters θSG.
255
+ As the parameters of SG priors are trained offline and then
256
+ frozen during inference, deeper network architectures can be
257
+ used for enhanced reconstruction performance along with fast
258
+ inference. However, learning deep networks requires substantial
259
+ training datasets that may be difficult to collect. Moreover,
260
+ since SG priors learn aggregate representations of MRI data
261
+ across training subjects, they may show poor generalization to
262
+ subject-specific variability in anatomy [19].
263
+ SS priors: Unlike SG priors, SS priors are not learned from
264
+ a dedicated training dataset but instead they are learned directly
265
+ for individual test scans to improve generalization [15]. The
266
+ SS prior can also be used to implement R(.) in Eq. 2 with the
267
+ respective regularization term expressed as:
268
+ RSS (x) = arg min
269
+ x
270
+ ||CSS(F −1y; ˆθSS) − x||2
271
+ 2
272
+ (7)
273
+ where CSS is an image-domain network with parameters ˆθSS.
274
+ In the absence of ground-truth images, the parameters θq
275
+ SS for
276
+ the qth test scan can be learned based on proxy k-space losses
277
+ between reconstructed and acquired undersampled data [22].
278
+ Learning is conducted online to minimize this proxy loss:
279
+ LSS(θq
280
+ SS) = ||MFCSS(F −1yq; θq
281
+ SS) − yq||p
282
+ (8)
283
+ where yq are acquired undersampled k-space data for the qth
284
+ scan. An unrolled architecture can be adopted to build CSS
285
+ by performing cascaded projections through network and DC
286
+ blocks, resulting in the following mapping for the kth cascade:
287
+ xq
288
+ k = fDC
289
+
290
+ fSS
291
+
292
+ xq
293
+ k−1; θq
294
+ SS
295
+ ��
296
+ (9)
297
+ fSS can be operationalized as a linear or nonlinear network [22],
298
+ [23]. As the parameters of SS priors are learned independently
299
+ for each test scan, they promise enhanced generalization to
300
+ subject-specific anatomy. However, since training is performed
301
+ online during inference, SS priors can introduce substantial
302
+ computational burden, particularly when deep nonlinear net-
303
+ works are used that also increase the risk of overfitting [70].
304
+ B. PSFNet
305
+ Here, we propose to combine SS and SG priors to maintain
306
+ a favorable trade-off between generalization performance
307
+ and computational efficiency under low-data regimes. In the
308
+ conventional unrolling framework, this requires computation
309
+ of serially alternated projections through the SS, SG and DC
310
+ blocks:
311
+ xr
312
+ k = fDC
313
+
314
+ fSG
315
+
316
+ fSS
317
+
318
+ xr
319
+ k−1; θr
320
+ SS
321
+
322
+ ; θSG
323
+ ��
324
+ (10)
325
+ The unrolled architecture with K cascades can be learned
326
+ offline using the training set. Note that scarcely-trained SG
327
+ blocks under low-data regimes can perform suboptimally,
328
+ introducing residual errors in their output. In turn, these errors
329
+ will accumulate across serial projections to degrade the overall
330
+ performance.
331
+ To address this limitation, here we introduce a novel
332
+ architecture, PSFNet, that performs parallel-stream fusion of
333
+ SS and SG priors as opposed to the serial combination in
334
+ conventional unrolled methods. PSFNet utilizes a nonlinear SG
335
+ prior for high performance, and a linear SS prior to enhance
336
+ generalization without excessive computational burden. The
337
+ two priors undergo parallel-stream fusion with learnable fusion
338
+ parameters η and γ, as displayed in Figure 1. These parame-
339
+ ters adaptively control the relative weighting of information
340
+ extracted by the SG versus SS streams during the course of
341
+ training in order to alleviate error accumulation. As such, the
342
+ mapping through the kth cascade in PSFNet is:
343
+ xr
344
+ k = ηkfDC(fSS(xr
345
+ k−1; θr
346
+ SS))+γkfDC(fSG(xr
347
+ k−1; θSG))
348
+ (11)
349
+ In Eq. 11, the learnable fusion parameters for the SS and
350
+ SG blocks at the kth cascade are ηk and γk, respectively. To
351
+ enforce fidelity to acquired data, DC projections are performed
352
+ on the outputs of SG and SS blocks. In PSFNet, the SG prior
353
+ is learned collectively from the set of training scans and then
354
+ frozen during inference on test scans. In contrast, the SS prior
355
+ is learned individually for each scan, during both training and
356
+ inference.
357
+ Training: PSFNet involves a training phase to learn model
358
+ parameters for the SG prior as well as its fusion with the SS
359
+ prior. For each individual scan in the training set, PSFNet
360
+ learns a dedicated SS prior for the given scan. Since learning
361
+ of a nonlinear SS prior has substantial computational burden,
362
+ we adopt a linear SS prior in PSFNet. In particular, the SS
363
+ block performs dealiasing via convolution with a linear kernel
364
+ [71]:
365
+ fSS(xn
366
+ k−1; θn
367
+ SS) = F −1{θn
368
+ SS ⊛ Fxn
369
+ k−1}
370
+ (12)
371
+ where θn
372
+ SS ∈ C(z×z×w×w) with n denoting the training scan
373
+ index, z denoting the number of coil elements, and w denoting
374
+ the kernel size in k-space. The SS blocks contain unlearned
375
+ Fourier and inverse Fourier transformation layers as their input
376
+ and output layers, respectively, and convolution is computed
377
+ over the spatial frequency dimensions in k-space. Meanwhile,
378
+ the SG prior is implemented as a deep CNN operating in image
379
+ domain:
380
+ fSG(xn
381
+ k−1; θSG) = CNN(xn
382
+ k−1)
383
+ (13)
384
+ Across the scans in the training set, the training loss for PSFNet
385
+
386
+ 4
387
+ Fig. 1: (a) PSFNet comprises a parallel-stream cascade of sub-networks where each sub-network contains (b) a scan-general
388
+ (SG) block, and (c) a scan-specific (SS) block. The two parallel blocks are each succeeded by (d) a data-consistency (DC)
389
+ block, and their outputs are aggregated with learnable fusion weights, ηk and γk where k is the cascade index. At the end of K
390
+ cascades, coil-combination is performed on multi-coil data using sensitivity maps estimated via ESPIRiT [71]. The SG block is
391
+ implemented as a deep convolutional neural network (CNN) and the SS block was implemented as a linear projection layer.
392
+ can then be expressed in constrained form as:
393
+ LP SF Net(θSG,γγγ,ηηη) =
394
+ N
395
+
396
+ n=1
397
+ ||ηKfDC(fSS(xn
398
+ K−1; ˆθn
399
+ SS))
400
+ + γKfDC(fSG(xn
401
+ K−1; θSG)) − ˘xn||p
402
+ s.t. ˆθn
403
+ SS = arg min
404
+ θn
405
+ SS
406
+ ||F −1W nyn − fSS(F −1W nyn; θn
407
+ SS)||2
408
+ 2
409
+ (14)
410
+ The constraint in Eq. 14 corresponds to the scan-specific
411
+ learning of the SS prior ˆθn
412
+ SS, which is then adopted to
413
+ calculate the loss. Assuming that the linear relationships among
414
+ neighboring spatial frequencies are similarly distributed across
415
+ k-space [71], ˆθn
416
+ SS is learned by solving a self-regression
417
+ problem on the subset of fully-sampled data in central k-space,
418
+ where W n is a mask operator that selects data within this
419
+ calibration region.
420
+ Note that, unlike deep reconstruction models purely based
421
+ on SG priors, the SG prior in PSFNet is not directly trained to
422
+ remove artifacts in zero-filled reconstructions of undersampled
423
+ data. Instead, the SG prior is trained to concurrently suppress
424
+ artifacts in reconstructed images along with the SS prior; and
425
+ the relative importance attributed to the two priors is determined
426
+ by the fusion parameters at each cascade. As such, the SS prior
427
+ can be given higher weight during initial training iterations
428
+ where the SG prior is scarcely trained, whereas its weight can
429
+ be relatively reduced during later iterations once the SG prior
430
+ has been sufficiently trained. This adaptive fusion approach
431
+ thereby lowers reliance on the availability of large training
432
+ sets.
433
+ Inference: During inference on the qth test scan, the respec-
434
+ tive SS prior is learned online as:
435
+ ˆθq
436
+ SS = arg min
437
+ θq
438
+ SS
439
+ ||F −1W qyq − f q
440
+ SS(F −1W qyq; θq
441
+ SS)||2
442
+ 2
443
+ (15)
444
+ Afterwards, the learned ˆθq
445
+ SS is used along with the previously
446
+ trained ˆθSG to perform repeated projections through K cas-
447
+ cades as described in Eq. 11. The multi-coil image recovered
448
+ by PSFNet at the output of the K cascade is:
449
+ ˆxq = ηKfDC(fSS(xq
450
+ K−1; ˆθq
451
+ SS))+γKfDC(fSG(xq
452
+ K−1; ˆθSG))
453
+ (16)
454
+ where ˆxq denotes the recovered image. The final reconstruction
455
+ can be obtained by performing combination across coils:
456
+ ˆxq
457
+ combined = A∗ˆxq
458
+ (17)
459
+ where A are coil sensitivities, and A∗ denotes the conjugate
460
+ of A.
461
+ III. METHODS
462
+ A. Implementation Details
463
+ In each cascade, PSFNet contained two parallel streams
464
+ with SG and SS blocks. The SG blocks comprised an input
465
+ layer followed by a stack of 4 convolutional layers with 64
466
+ channels and 3x3 kernel size each, and an output layer with
467
+ ReLU activation functions. They processed complex images
468
+ with separate channels for real and imaginary components. The
469
+ SS blocks comprised a Fourier layer, 5 projection layers with
470
+ identity activation functions, and an inverse Fourier layer. They
471
+ processed complex images directly without splitting real and
472
+ imaginary components. The linear convolution kernel used in
473
+ the projection layers was learned from the calibration region
474
+ by solving a Tikhonov regularized self-regression problem [11].
475
+ The DC blocks comprised 3 layers respectively to implement
476
+
477
+ a) Parallel-Stream Fusion Model (PSFNet)
478
+ Reconstructed
479
+ Zero-Filled
480
+ Nk
481
+ Reconstruction
482
+ Image
483
+ Coil
484
+ Combination
485
+ Z
486
+ Z
487
+ : b) Scan-General
488
+ c) Scan-Specific
489
+ d) Data-Consistency:
490
+ ReLU
491
+ ReLU
492
+ Conv
493
+ ReLU
494
+ Conv
495
+ ReLU
496
+ Conv
497
+ LU
498
+ Conv
499
+ Conv
500
+ Rel
501
+ ce
502
+ oni
503
+ eLU
504
+ ReLU
505
+ Conv
506
+ Conv
507
+ Conv
508
+ Conv
509
+ Conv
510
+ Xout-im
511
+ ReLi
512
+ ReLi
513
+ ReL!
514
+ R
515
+ Yin5
516
+ forward Fourier transformation, restoration of acquired k-
517
+ space data and inverse Fourier transformation. PSFNet was
518
+ implemented with 5 cascades, K=5. The weights of SG, SS, and
519
+ DC blocks were tied across cascades to limit model complexity
520
+ [27]. The only exception were fusion coefficients that determine
521
+ the relative weighting of the SG and SS blocks at each
522
+ stage (γ1, .., γk, ..., γ5 η1, ...ηk, ..., η5). These fusion parameters
523
+ were kept distinct across cascades. Coil-combination on the
524
+ recovered multi-coil images was performed using sensitivity
525
+ maps estimated via ESPIRiT [71].
526
+ B. MRI Dataset
527
+ Experimental demonstrations were performed using brain
528
+ MRI scans from the NYU fastMRI database [72]. Here, contrast-
529
+ enhanced T1-weighted (cT1-weighted) and T2-weighted acquisi-
530
+ tions were considered. The fastMRI dataset contains volumetric
531
+ MRI data with varying image and coil dimensionality across
532
+ subjects. Note that a central aim of this work was to sys-
533
+ tematically examine the learning capabilities of models for
534
+ varying number of training samples. To minimize potential
535
+ biases due to across-subject variability in MRI protocols, here
536
+ we selected subjects with matching imaging matrix size and
537
+ number of coils. To do this, we only selected subjects with at
538
+ least 10 cross-sections and only the central 10 cross-sections
539
+ were retained in each subject. We further selected subjects
540
+ with an in-plane matrix size of 256x320 for cT1 acquisitions,
541
+ and of 288x384 for T2 acquisitions. Background regions in
542
+ MRI data with higher dimensions were cropped. Lastly, we
543
+ restricted our sample selection to subjects with at least 5 coil
544
+ elements, and geometric coil compression [73] was applied to
545
+ unify the number of coils to 5 in all subjects.
546
+ Fully-sampled acquisitions were retrospectively undersam-
547
+ pled to achieve acceleration rates of R=4x and 8x. Random
548
+ undersampling patterns were designed via either a bi-variate
549
+ normal density function peaking at the center of k-space, or a
550
+ uniform density function across k-space. The standard deviation
551
+ of the normal density function was adjusted to maintain
552
+ the expected value of R across k-space. The fully-sampled
553
+ calibration region spanned a 40x40 window in central k-space.
554
+ C. Competing Methods
555
+ PSFNet was compared against several state-of-the-art ap-
556
+ proaches including SG methods, SS methods, and traditional
557
+ PI reconstructions. For methods containing SG priors, both
558
+ supervised and unsupervised variants were implemented.
559
+ PSFNet: A supervised variant of PSFNet was trained using
560
+ paired sets of undersampled and fully-sampled acquisitions.
561
+ PSFNetUS: An unsupervised variant of PSFNet was im-
562
+ plemented using self-supervision based on only undersampled
563
+ training data. Acquired data were split into two non-overlapping
564
+ sets where 40% of samples was reserved for evaluating the
565
+ training loss and 60% of samples was reserved to enforce DC
566
+ [64].
567
+ MoDL: A supervised SG methods based on an unrolled
568
+ architecture with tied weights across cascades was used [27].
569
+ MoDL serially interleaves SG and DC blocks. The number of
570
+ cascades and the structure of SG and DC blocks were identical
571
+ to those in PSFNet.
572
+ MoDLUS: An unsupervised variant of MoDL was imple-
573
+ mented using self-supervision. A 40%-60% split was performed
574
+ on acquired data to evaluate the training loss and enforce data
575
+ consistency, respectively [64].
576
+ sRAKI-RNN: An SS method was implemented based on the
577
+ MoDL architecture [67]. Learning was performed to minimize
578
+ DC loss on the fully-sampled calibration region. Calibration
579
+ data were randomly split with 75% of samples used to define
580
+ the training loss and 25% of samples reserved to enforce DC.
581
+ Multiple input-output pairs were produced for a single test
582
+ sample by utilizing this split.
583
+ SPIRiT: A traditional PI reconstruction was performed using
584
+ the SPIRiT method [11]. Reconstruction parameters including
585
+ the regularization weight for kernel estimation (κ), kernel size
586
+ (w), and the number of iterations (Niter) were independently
587
+ optimized for each reconstruction task via cross-validation.
588
+ SPARK: An SS method was used to correct residual errors
589
+ from an initial SPIRiT reconstruction [17]. Learning was
590
+ performed to minimize DC loss on the calibration region. The
591
+ learned SS prior was then used to correct residual errors in
592
+ the remainder of k-space.
593
+ D. Optimization Procedures
594
+ For all methods, hyperparameter selection was performed
595
+ via cross-validation on a three-way split of data across subjects.
596
+ There was no overlap among training, validation and test sets
597
+ in terms of subjects. Data from 10 subjects were reserved for
598
+ validation, and data from a separate set of 40 subjects were
599
+ reserved for testing. The number of subjects in the training
600
+ set was varied from 1 to 50. Hyperparameters that maximized
601
+ peak signal-to-noise ratio (PSNR) on the validation set were
602
+ selected for each method.
603
+ Training was performed via the Adam optimizer with
604
+ learning rate ζ=10−4, β1=0.90 and β2=0.99 [74]. All deep
605
+ learning methods were trained to minimize hybrid ℓ1-ℓ2-
606
+ norm loss between recovered and target data (e.g., between
607
+ reconstructed and ground truth images for PSFNet, between
608
+ recovered and acquired k-space samples for PSFNetUS) [64].
609
+ For PSFNet and MoDL, the selected number of epochs was
610
+ 200, batch size was set to 2 for the limited number of training
611
+ samples (Nsamples <10), and to 5 otherwise. In DC blocks,
612
+ λ = ∞ was used to enforce strict data consistency. For PSFNet
613
+ and SPIRiT, the kernel width (w) and regularization parameter
614
+ (κ) values were set as (κ, w) = (10−2, 9) at R= 4 and (10−2,
615
+ 9) at R=8 for cT1-weighted reconstructions, and as (100, 17)
616
+ at R=4 and (10−2, 17) at R=8 for T2-weighted reconstructions.
617
+ For SPIRiT, the number of iterations Niter was set as 13
618
+ at R=4 and 27 at R=8 for cT1-weighted reconstructions, 20
619
+ at R=4 and 38 at R=8 for T2-weighted reconstructions. For
620
+ sRAKI-RNN, the selected number of epochs was 500 and
621
+ batch size was set to 32. All other optimization procedures
622
+ were identical to MoDL. For SPARK, network architecture
623
+ and training procedures were adopted from [17], except for the
624
+ number of epochs (Nepoch) and learning rate (ζ) which were
625
+ optimized on the validation set as (Nepoch, ζ)= (100, 10−2) For
626
+
627
+ 6
628
+ Fig. 2: Average PSNR across test subjects for (a) cT1- and (b)
629
+ T2-weighted image reconstructions at R=4x. Model training was
630
+ performed for varying number of training samples (Nsamples,
631
+ lower x-axis) and thereby training subjects (Nsubjects, upper
632
+ x-axis). Results are shown for SPIRiT, SPARK, sRAKI-RNN,
633
+ MoDL and PSFNet.
634
+ cT1-weighted reconstructions, and (Nepoch, ζ)= (250, 10−3)
635
+ for T2-weighted reconstructions.
636
+ All competing methods were executed on an NVidia RTX
637
+ 3090 GPU, and models were coded in Tensorflow except for
638
+ SPARK which was implemented in PyTorch. SPARK was
639
+ implemented using the toolbox at https://github.com/
640
+ YaminArefeen/spark_mrm_2021. The code to imple-
641
+ ment PSFNet will be available publicly at https://github.
642
+ com/icon-lab/PSFNet upon publication.
643
+ E. Performance Metrics
644
+ Performance assessments for reconstruction methods were
645
+ carried out by visual observations and quantitative metrics.
646
+ PSNR and structural similarity index (SSIM) were used
647
+ for quantitative evaluation. For each method, metrics were
648
+ computed on coil-combined images from the reconstruction
649
+ and from the fully-sampled ground truth acquisition. Statistical
650
+ differences between competing methods were examined via
651
+ non-parametric Wilcoxon signed-rank tests.
652
+ F. Experiments
653
+ Several different experiments were conducted to system-
654
+ atically examine the performance of competing methods.
655
+ Assessments aimed to investigate reconstruction performance
656
+ under low training data regimes, generalization performance
657
+ in case of mismatch between training and testing domains,
658
+ contribution of the parallel-stream design to reconstruction per-
659
+ formance, sensitivity to hyperparameter selection, performance
660
+ in unsupervised learning, and computational complexity.
661
+ Performance in low-data regimes: Deep SG methods for
662
+ MRI reconstruction typically suffer from suboptimal perfor-
663
+ mance as the size of the training dataset is constrained. To
664
+ systematically examine reconstruction performance, we trained
665
+ supervised variants of PSFNet and MoDL while the number
666
+ of training samples (Nsamples) was varied in the range [2-
667
+ 500] cross sections. To attain a given number of samples,
668
+ sequential selection was performed across subjects and across
669
+ cross-sections within each subject. Thus, the number of unique
670
+ subjects included in the training set roughly corresponded to
671
+ Nsamples/10 (since there were 10 cross-sections per subject).
672
+ SS reconstructions were also performed with sRAKI-RNN,
673
+ SPIRiT and SPARK. In the absence of fully-sampled ground
674
+ truth data to guide the learning of the prior, unsupervised
675
+ training of deep reconstruction models may prove relatively
676
+ more difficult compared to supervised training. In turn, this
677
+ may elevate requirements on training datasets for unsupervised
678
+ models. To examine data efficiency for unsupervised training,
679
+ we compared the reconstruction performance of PSFNetUS and
680
+ MoDLUS as Nsamples was varied in the range of [2-500] cross
681
+ sections. Comparisons were also provided against sRAKI-RNN,
682
+ SPIRiT and SPARK.
683
+ Generalization performance: Deep reconstruction models
684
+ can suffer from suboptimal generalization when the MRI data
685
+ distribution shows substantial variation between the training and
686
+ testing domains. To examine generalizability, PSFNet models
687
+ were trained on data from a source domain and tested on data
688
+ from a different target domain. The domain-transferred models
689
+ were then compared to models trained and tested directly
690
+ in the target domain. Three different factors were altered to
691
+ induce domain variation: tissue contrast, undersampling pattern,
692
+ and acceleration rate. First, the capability to generalize to
693
+ different tissue contrasts was evaluated. Models were trained
694
+ on data from a source contrast and tested on data from
695
+ a different target contrast. Domain-transferred models were
696
+ compared to target-domain models trained on data from the
697
+ target contrast. Next, the capability to generalize to different
698
+ undersampling patterns was assessed. Models were trained on
699
+ data undersampled with variable-density patterns and tested
700
+ on data undersampled with uniform-density patterns. Domain-
701
+ transferred models were compared to target-domain models
702
+ trained on uniformly undersampled data. Lastly, the capability
703
+ to generalize to different acceleration rates was examined.
704
+ Models were trained on acquisitions accelerated at R=4x and
705
+ tested on acquisitions accelerated at R=8x. Domain-transferred
706
+ models were compared to target-domain models trained at
707
+ R=8x.
708
+ Sensitivity to hyperparameters: SS priors are learned
709
+ from individual test scans as opposed to SG priors trained
710
+ on larger training datasets. Thus, SS priors might show
711
+ elevated sensitivity to hyperparameter selection. We assessed
712
+ the reliability of reconstruction performance against suboptimal
713
+
714
+ Nsubjects
715
+ (r
716
+ X
717
+ 40
718
+ 39
719
+ PSNR (dB)
720
+ 38
721
+ SPIRiT
722
+ 37
723
+ SPARK
724
+ 36
725
+ SRAKI-RNN
726
+ MoDL
727
+ 35
728
+ PSFNet
729
+ 34
730
+ Nsamples
731
+ b)
732
+ Nsubjects
733
+ X
734
+ 6
735
+ 40
736
+ 39
737
+ 38
738
+ PSNR (dB)
739
+ SPIRiT
740
+ SPARK
741
+ SRAKI-RNN
742
+ MoDL
743
+ 34
744
+ PSFNet
745
+ 33
746
+ Nsamples7
747
+ Fig. 3: cT1-weighted image reconstructions at R=4x via SPIRiT, SPARK, sRAKI-RNN, MoDL, and PSFNet along with the
748
+ zero-filled reconstruction (ZF) and the reference image obtained from the fully-sampled acquisition. Error maps for each method
749
+ are shown in the bottom row. MoDL and PSFNet were trained on 10 cross-sections from a single subject. SPIRiT, SPARK and
750
+ sRAKI-RNN directly performed inference on test data without a priori model training. PSFNet shows superior performance to
751
+ competing methods in terms of residual reconstruction errors.
752
+ Fig. 4: T2-weighted image reconstructions at R=4x via SPIRiT, SPARK, sRAKI-RNN, MoDL, and PSFNet along with the
753
+ zero-filled reconstruction (ZF) and the reference image obtained from the fully-sampled acquisition. Error maps for each method
754
+ are shown in the bottom row. MoDL and PSFNet were trained on 10 cross-sections from a single subject. SPIRiT, SPARK and
755
+ sRAKI-RNN directly performed inference on test data without a priori model training. PSFNet shows superior performance to
756
+ competing methods in terms of residual reconstruction errors.
757
+ hyperparameter selection for SS priors. For this purpose,
758
+ analyses were conducted on SPIRiT, SPARK and PSFNet
759
+ that embody SS methods to perform linear reconstructions
760
+ in k-space. The set of hyperparameters examined included
761
+ regularization parameters for kernel estimation (κ) and kernel
762
+ size (w). Separate models were trained using κ in range [10-3-
763
+ 100] and w in range [5-17].
764
+ Computational complexity: Finally, we assessed the com-
765
+ putational complexity of competing methods. For each method,
766
+ training and inference times were measured for a single subject
767
+ with 10 cross-sections. Each cross-section had an imaging
768
+ matrix size of 256x320 and contained data from 5 coils. For
769
+ all methods including SS priors, hyperparameters optimized
770
+ for cT1-weighted reconstructions at R=4 were used.
771
+ Ablation analysis: To assess the contribution of the parallel-
772
+ stream design in PSFNet, a conventional unrolled variant
773
+ of PSFNet was formed, named as PSFNetSerial. PSFNetSerial
774
+ combined the SG and SS priors via serial projections as
775
+ described in Eq. 10. Modeling procedures and the design of
776
+ SG and SS blocks were kept identical between PSFNet and
777
+ PSFNetSerial for fair comparison. Performance was assessed as
778
+ Nsamples was varied in the range of [2-500] cross sections.
779
+ IV. RESULTS
780
+ A. Performance in Low-Data Regimes
781
+ Common SG methods for MRI reconstruction are based
782
+ on deep networks that require copious amounts of training
783
+ data, so performance can substantially decline on limited
784
+ training sets [28], [59]. In contrast, PSFNet leverages an SG
785
+ prior to concurrently reconstruct an image along with an SS
786
+ prior. Therefore, we reasoned that its performance should scale
787
+ favorably under low-data regimes compared to SG methods. We
788
+ also reasoned that PSFNet should yield elevated performance
789
+ compared to SS methods due to residual corrections from its SG
790
+ prior. To test these predictions, we trained supervised variants
791
+ of PSFNet and MoDL along with SPIRiT, sRAKI-RNN, and
792
+ SPARK while the number of training samples (Nsamples) was
793
+
794
+ ZF
795
+ SPIRiT
796
+ SPARK
797
+ SRAKI-RNN
798
+ MoDI
799
+ PSFNet
800
+ Reference
801
+ 0.12
802
+ Error
803
+ 0ZF
804
+ SPIRiT
805
+ SPARK
806
+ SRAKI-RNN
807
+ MoDL
808
+ PSFNet
809
+ Reference
810
+ 0.12
811
+ Error
812
+ 08
813
+ Fig. 5: Weighting of the SG (γ) and SS (η) blocks in the
814
+ final cascade of PSFNet. Weights were averaged across models
815
+ trained for cT1- and T2-weighted reconstructions at R=4x.
816
+ Model training was performed for varying number of training
817
+ samples (Nsamples, lower x-axis) and thereby training subjects
818
+ (Nsubjects, upper x-axis). Both blocks are equally weighted
819
+ with very limited training data. As Nsamples increases, the
820
+ weighting of the SG prior becomes more dominant over the
821
+ weighting of the SS prior.
822
+ systematically varied. Figure 2 displays PSNR performance
823
+ for cT1-weighted and T2-weighted image reconstruction as a
824
+ function of Nsamples. PSFNet outperforms the scan-general
825
+ MoDL method for all values of Nsamples (p < 0.05). As
826
+ expected, performance benefits with PSFNet become more
827
+ prominent towards lower values of Nsamples. PSFNet also
828
+ outperforms traditional SPIRiT and scan-specific sRAKI-RNN
829
+ and SPARK methods broadly across the examined range
830
+ of Nsamples (p < 0.05). Note that while MoDL requires
831
+ Nsamples = 30 (3 subjects) to offer on par performance to
832
+ SS methods, PSFNet yields superior performance with as few
833
+ as Nsamples = 2. Representative reconstructions for cT1- and
834
+ T2-weighted images are depicted in Figures 3 and 4, where
835
+ Nsamples = 10 from a single subject were used for training.
836
+ PSFNet yields lower reconstruction errors compared to all other
837
+ methods in this low-data regime, where competing methods
838
+ either show elevated noise or blurring.
839
+ Naturally, the performance of PSFNet increases as more
840
+ training samples are available. Since the SS prior is inde-
841
+ pendently learned for individual samples, it should not elicit
842
+ systematic performance variations depending on Nsamples.
843
+ Thus, the performance gains can be attributed to improved
844
+ learning of the SG prior. In turn, we predicted that PSFNet
845
+ would put more emphasis on its SG stream as its reliability
846
+ increases. To examine this issue, we inspected the weightings
847
+ of the SG (γ) and SS (η) streams as the training set size was
848
+ varied. Figure 5 displays weightings at the last cascade as a
849
+ function of Nsamples. For lower values of Nsamples where the
850
+ quality of the SG prior is relatively limited, the SG and SS
851
+ priors are almost equally weighted. In contrast, as the learning
852
+ of the SG prior improves with higher Nsamples, the emphasis
853
+ on the SG prior increases while the SS prior is less heavily
854
+ Fig. 6: Average PSNR across test subjects for (a) cT1- and (b)
855
+ T2-weighted image reconstructions at R=4x. Model training was
856
+ performed for varying number of training samples (Nsamples,
857
+ lower x-axis) and thereby training subjects (Nsubjects, upper
858
+ x-axis). Results are shown for SPIRiT, SPARK, sRAKI-RNN,
859
+ MoDLUS and PSFNetUS.
860
+ weighted.
861
+ We then questioned whether the performance benefits of
862
+ PSFNet are also apparent during unsupervised training of
863
+ deep network models. For this purpose, unsupervised variants
864
+ PSFNetUS and MoDLUS were trained via self-supervision [64].
865
+ PSFNetUS was compared against MoDLUS, SPIRiT, sRAKI-
866
+ RNN, and SPARK while the number of training samples
867
+ (Nsamples) was systematically varied. Figure 6 displays PSNR
868
+ performance for cT1-weighted and T2-weighted image recon-
869
+ struction as a function of Nsamples. Similar to the supervised
870
+ setting, PSFNetUS outperforms MoDLUS for all values of
871
+ Nsamples (p < 0.05), and the performance benefits are more
872
+ noticeable at lower Nsamples. In this case, however, MoDLUS
873
+ is unable to reach the performance of the best performing
874
+ SS method (SPARK) even at Nsamples = 500. In contrast,
875
+ PSFNetUS starts outperforming SPARK with approximately
876
+ Nsamples = 50 (5 subjects). The enhanced reconstruction
877
+ quality with PSFNetUS is corroborated in representative re-
878
+ constructions for cT1- and T2-weighted images depicted in
879
+ Figures 7 and 8, where Nsamples = 100 were used for training.
880
+ Taken together, these results indicate that the data-efficient
881
+ nature of PSFNet facilitates the training of both supervised
882
+ and unsupervised MRI reconstruction models.
883
+
884
+ Nsubjects
885
+ 5
886
+ 6
887
+ Y (SG)
888
+ 0.60
889
+ n (Ss)
890
+ 0.55
891
+ Weighting
892
+ 0.50
893
+ 0.45
894
+ 0.40
895
+ 4
896
+ 6
897
+ 8
898
+ NsamplesNsubjects
899
+ a
900
+ X
901
+ 40
902
+ 39
903
+ 38
904
+ 37
905
+ (dB)
906
+ 36
907
+ SPIRiT
908
+ 35
909
+ PSNR
910
+ 34
911
+ SPARK
912
+ SRAKI - RNN
913
+ 33
914
+ MoDLus
915
+ 32
916
+ PSFNetus
917
+ 31
918
+ 30
919
+ 2
920
+ X
921
+ Nsamples
922
+ b)
923
+ Nsubjects
924
+ 5
925
+ 6
926
+ 40
927
+ 39
928
+ 38
929
+ 37
930
+ (dB)
931
+ 36
932
+ .343
933
+ SPIRiT
934
+ PSNR
935
+ SPARK
936
+ SRAKI - RNN
937
+ MoDLus
938
+ 32
939
+ PSFNetus
940
+ 31
941
+ 30
942
+ X
943
+ Nsamples9
944
+ Fig. 7: cT1-weighted image reconstructions at R=4x via SPIRiT, SPARK, sRAKI-RNN, MoDLUS, and PSFNetUS along with
945
+ the zero-filled reconstruction (ZF) and the reference image obtained from the fully-sampled acquisition. Error maps for each
946
+ method are shown in the bottom row. MoDLUS and PSFNetUS were trained on 100 cross-sections (from 10 subjects). SPIRiT,
947
+ SPARK and sRAKI-RNN directly performed inference on test data without a priori model training. PSFNetUS shows superior
948
+ performance to competing methods in terms of residual reconstruction errors.
949
+ Fig. 8: T2-weighted image reconstructions at R=4x via SPIRiT, SPARK, sRAKI-RNN, MoDLUS, and PSFNetUS along with
950
+ the zero-filled reconstruction (ZF) and the reference image obtained from the fully-sampled acquisition. Error maps for each
951
+ method are shown in the bottom row. MoDLUS and PSFNetUS were trained on 100 cross-sections (from 10 subjects). SPIRiT,
952
+ SPARK and sRAKI-RNN directly performed inference on test data without a priori model training. PSFNetUS shows superior
953
+ performance to competing methods in terms of residual reconstruction errors.
954
+ B. Generalization Performance
955
+ An important advantage of SS priors is that they allow
956
+ model adaptation to individual test samples, thereby promise
957
+ enhanced performance in out-of-domain reconstructions [22].
958
+ Yet, SG priors with fixed parameters might show relatively
959
+ limited generalizability during inference [23], [75]. To assess
960
+ generalization performance, we introduced domain variations
961
+ by altering three experimental factors: tissue contrast, under-
962
+ sampling pattern, and acceleration rate. For methods comprising
963
+ SG components, we built both target-domain models that were
964
+ trained in the target domain, and domain-transferred models
965
+ that were trained in a non-target domain. We then compared
966
+ the reconstruction performances of the two models in the target
967
+ domain.
968
+ First, we examined generalization performance when the
969
+ tissue contrast varied between training and testing domains
970
+ (e.g., trained on cT1, tested on T2). Table I lists performance
971
+ metrics for competing methods with Nsamples = 500. While
972
+ performance losses are incurred for domain-transferred PSFNet-
973
+ DT and MoDL-DT models that contain SG components, these
974
+ losses are modest. On average, MoDL-DT shows a loss of
975
+ 0.3dB PSNR and 0.1% SSIM (p < 0.05), and PSFNet-DT
976
+ shows a loss of 0.2dB PSNR and 0.1% SSIM (p < 0.05). Note
977
+ that PSFNet-DT still outperforms the closest competing SS
978
+ method by 2.2dB PSNR and 1.8% SSIM (p < 0.05).
979
+ Second, we examined generalization performance when mod-
980
+ els were trained with variable-density and tested on uniform-
981
+ density undersampling patterns. Table II lists performance
982
+ metrics for competing methods. On average across tissue
983
+ contrasts, MoDL-DT suffers a notable performance loss of
984
+ 3.6dB PSNR and 2.5% SSIM (p < 0.05). In contrast, PSFNet-
985
+ DT shows a relatively limited loss of 0.4dB PSNR and 0.2%
986
+ SSIM (p < 0.05). Note that PSFNet-DT again outperforms the
987
+ closest competing SS method by 3.4dB PSNR and 3.7% SSIM
988
+ (p < 0.05).
989
+ Third, we examined generalization performance when models
990
+ were trained at R=4x and tested on R=8x. Table III lists
991
+
992
+ ZF
993
+ SPIRiT
994
+ SPARK
995
+ sRAKI-RNN
996
+ MoDL
997
+ PSFNet
998
+ Reference
999
+ 0.12
1000
+ Error
1001
+ 0ZF
1002
+ SPIRiT
1003
+ SPARK
1004
+ sRAKI-RNN
1005
+ MoDL
1006
+ PSFNet
1007
+ Reference
1008
+ 0.12
1009
+ Error
1010
+ 010
1011
+ TABLE I: Generalization across tissue contrasts. PSNR and
1012
+ SSIM values (mean±standard error) across test subjects.
1013
+ Results are shown for scan-specific models (SPIRiT, SPARK,
1014
+ sRAKI-RNN), target-domain models (MoDL, PSFNet) and
1015
+ domain-transferred models (MoDL-DT, PSFNet-DT) at R=4x.
1016
+ The tissue contrast in the target domain is listed in the left-most
1017
+ column (cT1 or T2), domain-transferred models were trained
1018
+ for the non-target tissue contrast.
1019
+ SPIRiT
1020
+ SPARK
1021
+ sRAKI-
1022
+ RNN
1023
+ MoDL
1024
+ MoDL-DT
1025
+ PSFNet
1026
+ PSFNet-
1027
+ DT
1028
+ PSNR
1029
+ cT1
1030
+ 37.6
1031
+ 37.6
1032
+ 36.8
1033
+ 38.5
1034
+ 38.2
1035
+ 39.9
1036
+ 39.4
1037
+ ±1.5
1038
+ ±1.5
1039
+ ±1.3
1040
+ ± 1.5
1041
+ ±1.5
1042
+ ±1.7
1043
+ ±1.6
1044
+ T2
1045
+ 35.8
1046
+ 36.5
1047
+ 35.2
1048
+ 37.9
1049
+ 37.5
1050
+ 39.0
1051
+ 39.0
1052
+ ±1.0
1053
+ ±1.0
1054
+ ±1.1
1055
+ ± 1.0
1056
+ ±1.1
1057
+ ±1.0
1058
+ ±0.9
1059
+ SSIM
1060
+ cT1
1061
+ 93.1
1062
+ 93.3
1063
+ 93.8
1064
+ 95.1
1065
+ 94.8
1066
+ 95.8
1067
+ 95.6
1068
+ ±1.5
1069
+ ±1.4
1070
+ ±1.0
1071
+ ±1.0
1072
+ ±1.1
1073
+ ±1.0
1074
+ ±1.0
1075
+ T2
1076
+ 90.8
1077
+ 93.1
1078
+ 94.9
1079
+ 96.2
1080
+ 96.2
1081
+ 96.7
1082
+ 96.8
1083
+ ±1.2
1084
+ ±1.0
1085
+ ±0.6
1086
+ ±0.5
1087
+ ±0.5
1088
+ ±0.4
1089
+ ±0.4
1090
+ TABLE II: Generalization across undersampling patterns. PSNR
1091
+ and SSIM values (mean±standard error) across test subjects.
1092
+ Results are shown for Results are shown for scan-specific
1093
+ models (SPIRiT, SPARK, sRAKI-RNN), target-domain models
1094
+ (MoDL, PSFNet) and domain-transferred models (MoDL-DT,
1095
+ PSFNet-DT) at R=4x. Domain-transferred models were trained
1096
+ with variable-density undersampling, and tested on uniform-
1097
+ density undersampling. Target-domain models were trained and
1098
+ tested with uniform-density undersampling.
1099
+ SPIRiT
1100
+ SPARK
1101
+ sRAKI-
1102
+ RNN
1103
+ MoDL
1104
+ MoDL-DT
1105
+ PSFNet
1106
+ PSFNet-
1107
+ DT
1108
+ PSNR
1109
+ cT1
1110
+ 37.1
1111
+ 37.1
1112
+ 33.6
1113
+ 37.0
1114
+ 33.6
1115
+ 40.2
1116
+ 39.9
1117
+ ±1.8
1118
+ ±1.7
1119
+ ±1.4
1120
+ ± 1.7
1121
+ ±1.8
1122
+ ±1.6
1123
+ ±1.6
1124
+ T2
1125
+ 35.1
1126
+ 35.6
1127
+ 31.6
1128
+ 37.0
1129
+ 33.2
1130
+ 40.2
1131
+ 39.7
1132
+ ±1.3
1133
+ ±1.3
1134
+ ±1.5
1135
+ ± 1.1
1136
+ ±1.2
1137
+ ±1.1
1138
+ ±1.2
1139
+ SSIM
1140
+ cT1
1141
+ 92.9
1142
+ 93.0
1143
+ 91.2
1144
+ 93.4
1145
+ 91.2
1146
+ 95.9
1147
+ 95.6
1148
+ ±1.5
1149
+ ±1.5
1150
+ ±1.5
1151
+ ±1.3
1152
+ ±2.0
1153
+ ±1.2
1154
+ ±1.2
1155
+ T2
1156
+ 90.6
1157
+ 92.1
1158
+ 91.5
1159
+ 95.6
1160
+ 92.7
1161
+ 97.1
1162
+ 96.9
1163
+ ±1.5
1164
+ ±1.5
1165
+ ±1.2
1166
+ ±0.7
1167
+ ±1.1
1168
+ ±0.6
1169
+ ±0.6
1170
+ performance metrics for competing methods. On average across
1171
+ tissue contrasts, MoDL-DT suffers a notable performance loss
1172
+ of 1.0dB PSNR and performs slightly better in SSIM by
1173
+ 0.2%SSIM (p < 0.05), whereas PSFNet-DT shows a lower loss
1174
+ of 0.6dB PSNR (p < 0.05) and performs similarly in SSIM
1175
+ (p > 0.05). PSFNet-DT outperforms the closest competing SS
1176
+ method by 1.2dB PSNR and 1.9% SSIM (p < 0.05). Taken
1177
+ together, these results clearly suggest that the SS prior in
1178
+ PSFNet contributes to its improved generalization performance
1179
+ over the scan-general MoDL method, while the SG prior in
1180
+ PSFNet enables it to outperform competing SS methods.
1181
+ C. Sensitivity to Hyperparameters
1182
+ Parameters of deep networks that implement SS priors are to
1183
+ be learned from a single test sample, so the resultant models can
1184
+ show elevated sensitivity to the selection of hyperparameters
1185
+ compared to SG priors learned from a collection of training
1186
+ TABLE III: Generalization across acceleration rates. PSNR
1187
+ and SSIM values (mean±standard error) across test subjects.
1188
+ Results are shown for scan-specific models (SPIRiT, SPARK,
1189
+ sRAKI-RNN), target-domain models (MoDL, PSFNet) and
1190
+ domain-transferred models (MoDL-DT, PSFNet-DT). Domain-
1191
+ transferred models were trained at R=4x and tested at R=8x.
1192
+ Target-domain models were trained and tested at R=8x.
1193
+ SPIRiT
1194
+ SPARK
1195
+ sRAKI-
1196
+ RNN
1197
+ MoDL
1198
+ MoDL-DT
1199
+ PSFNet
1200
+ PSFNet-
1201
+ DT
1202
+ PSNR
1203
+ cT1
1204
+ 34.7
1205
+ 34.8
1206
+ 34.3
1207
+ 35.3
1208
+ 34.5
1209
+ 36.5
1210
+ 36.2
1211
+ ±1.5
1212
+ ±1.5
1213
+ ±1.5
1214
+ ± 1.4
1215
+ ±1.7
1216
+ ±1.5
1217
+ ±1.5
1218
+ T2
1219
+ 33.6
1220
+ 33.7
1221
+ 32.6
1222
+ 34.6
1223
+ 33.4
1224
+ 35.6
1225
+ 34.6
1226
+ ±1.0
1227
+ ±1.0
1228
+ ±0.9
1229
+ ± 1.0
1230
+ ±1.2
1231
+ ±1.1
1232
+ ±1.2
1233
+ SSIM
1234
+ cT1
1235
+ 89.8
1236
+ 90.8
1237
+ 91.4
1238
+ 92.1
1239
+ 92.2
1240
+ 93.3
1241
+ 93.3
1242
+ ±1.9
1243
+ ±1.6
1244
+ ±1.4
1245
+ ±1.5
1246
+ ±1.4
1247
+ ±1.4
1248
+ ±1.4
1249
+ T2
1250
+ 89.0
1251
+ 90.1
1252
+ 92.7
1253
+ 93.5
1254
+ 93.7
1255
+ 94.6
1256
+ 94.5
1257
+ ±1.3
1258
+ ±1.1
1259
+ ±0.9
1260
+ ±0.8
1261
+ ±0.8
1262
+ ±0.7
1263
+ ±0.7
1264
+ samples. Thus, we investigated the sensitivity of PSFNet to key
1265
+ hyperparameters of its SS prior. SPIRiT, SPARK and PSFNet
1266
+ methods all embody a linear k-space reconstruction, so the
1267
+ relevant hyperparameters are the regularization weight and
1268
+ width for the convolution kernel. Performance was evaluated for
1269
+ models were trained in the low-data regime (i.e., Nsamples =
1270
+ 10, 1 subject) for varying hyperparameter values.
1271
+ Figure 9 displays PSNR measurements for SPIRiT, SPARK
1272
+ and PSFNet across κ in range (10-3-100). While the perfor-
1273
+ mance of SPIRiT and SPARK is notably influenced by κ,
1274
+ PSFNet is minimally affected by sub-optimal selection. On
1275
+ average across contrasts, the difference between the maximum
1276
+ and minimum PSNR values is 8.4dB for SPIRiT, 4.5dB for
1277
+ SPARK, and a lower 0.7dB for PSFNet. Note that PSFNet
1278
+ outperforms competing methods across the entire range of
1279
+ κ (p < 0.05). Figure 10 shows PSNR measurements for
1280
+ competing methods across w in range (5-17). In this case,
1281
+ all methods show relatively limited sensitivity to the selection
1282
+ of w. On average across contrasts, the difference between the
1283
+ maximum and minimum PSNR values is 1.5dB for SPIRiT,
1284
+ 0.5dB for SPARK, and 0.2dB for PSFNet. Again, PSFNet
1285
+ outperforms competing methods across the entire range of
1286
+ w (p < 0.05). Overall, our results indicate that PSFNet
1287
+ yields improved reliability against sub-optimal hyperparameter
1288
+ selection than competing SS methods.
1289
+ D. Computational Complexity
1290
+ Next, we assessed the computational complexity of com-
1291
+ peting methods. Table IV lists the training times of methods
1292
+ with SG priors, MoDL and PSFNet. Note that the remaining
1293
+ SS based methods do not involve a pre-training step. As it
1294
+ involves learning of an SS prior on each training sample,
1295
+ PSFNet yields elevated training time compared to MoDL. In
1296
+ return, however, it offers enhanced generalization performance
1297
+ and data-efficient learning. Table IV also lists the inference
1298
+ times of SPIRiT, SPARK, sRAKI-RNN, MoDL and PSFNet.
1299
+ MoDL and PSFNet that employ SG priors with fixed weights
1300
+ during inference offer fast run times. In contrast, SPARK and
1301
+
1302
+ 11
1303
+ Fig. 9: PSNR measurements were performed on recovered cT1-
1304
+ and T2-weighted images at R=4x. Bar plots in blue color show
1305
+ average PSNR across κ ∈ 10-3-101 (i.e., the regularization
1306
+ parameter for kernel estimation). Error bars denote the 90%
1307
+ interval across κ. Bar plots in red color show PSNR for methods
1308
+ that do not depend on the value of κ.
1309
+ Fig. 10: PSNR measurements were performed on recovered
1310
+ cT1- and T2-weighted images at R=4x. Bar plots in blue color
1311
+ show the average PSNR across w ∈ 5-17 (i.e., the kernel size).
1312
+ Error bars denote the 90% interval across w. Bar plots in red
1313
+ color show PSNR for methods that do not depend on the value
1314
+ of w.
1315
+ sRAKI-RNN that involve SS priors learned on individual test
1316
+ samples have a high computational burden. Although PSFNet
1317
+ also embodies an SS prior, its uses a relatively lightweight
1318
+ linear prior as opposed to the nonlinear priors in competing
1319
+ SS methods. Therefore, PSFNet benefits from data-efficient
1320
+ learning while maintaining computationally-efficient inference.
1321
+ E. Ablation Analysis
1322
+ To demonstrate the value of the parallel-stream fusion
1323
+ strategy in PSFNet over conventional unrolling, PSFNet was
1324
+ compared against a variant model PSFNetSerial that combined
1325
+ SS and SG priors through serially alternated projections.
1326
+ Separate models were trained with number of training samples
1327
+ in the range Nsamples=[2-500]. Performance in cT1- and T2
1328
+ -weighted image reconstruction is displayed in Figure 11.
1329
+ PSFNet significantly improves reconstruction performance over
1330
+ PSFNetSerial across the entire range of Nsamples considered
1331
+ (p < 0.05), and the benefits grow stronger for smaller training
1332
+ sets. On average across contrasts for Nsamples < 10, PSFNet
1333
+ TABLE IV: Computational complexity of competing methods.
1334
+ Training and inference times for data from a single subject,
1335
+ with 10 cross-sections, imaging matrix size 256x320 and 5
1336
+ coils. Run times are listed for SPARK, sRAKI-RNN, MoDL,
1337
+ and PSFNet.
1338
+ SPIRiT
1339
+ SPARK
1340
+ sRAKI-RNN
1341
+ MoDL
1342
+ PSFNet
1343
+ Training(s)
1344
+ -
1345
+ -
1346
+ -
1347
+ 132
1348
+ 337
1349
+ Inference(s)
1350
+ 0.85
1351
+ 23.35
1352
+ 285.00
1353
+ 0.25
1354
+ 1.13
1355
+ Fig. 11: Average (a) PSNR and (b) SSIM values for cT1- and
1356
+ T2-weighted image reconstructions at R=4x. Model training was
1357
+ performed for varying number of training samples (Nsamples,
1358
+ lower x-axis) and thereby training subjects (Nsubjects, upper
1359
+ x-axis). Results are shown for PSFNet and PSFNetSerial.
1360
+ outperforms PSFNetSerial by 1.8dB PSNR and 0.6% SSIM
1361
+ (p < 0.05). These results indicate that the parallel-stream
1362
+ fusion of SG and SS priors in PSFNet is superior to the serial
1363
+ projections in conventional unrolling.
1364
+ V. DISCUSSION AND CONCLUSION
1365
+ In this study, we introduced PSFNet for data-efficient training
1366
+ of deep reconstruction models in accelerated MRI. PSFNet
1367
+ synergistically fuses SS and SG priors in a parallel-stream
1368
+ architecture. The linear SS prior improves learning efficiency
1369
+ while mataining relatively low computational footprint, whereas
1370
+ the nonlinear SG prior enables improved reconstruction per-
1371
+ formance. For both supervised and unsupervised training
1372
+ setups, the resulting model substantially reduces dependence
1373
+ on the availability of large MRI datasets. Furthermore, it
1374
+ achieves competitive inference times to SG methods, and
1375
+
1376
+ 38
1377
+ (dB)
1378
+ 36
1379
+ PSNR
1380
+ 34
1381
+ 32
1382
+ 30
1383
+ MoDL
1384
+ SPIRiT
1385
+ SPARK
1386
+ PSFNet
1387
+ SRAKI - RNN38
1388
+ (dB)
1389
+ 36
1390
+ PSNR
1391
+ 34
1392
+ 32
1393
+ 30
1394
+ MoDL
1395
+ SPIRiT
1396
+ SPARK
1397
+ PSFNet
1398
+ SRAKI - RNNNsubjects
1399
+ a
1400
+ X
1401
+ 40
1402
+ 39
1403
+ PSNR (dB)
1404
+ 38
1405
+ 37
1406
+ PSFNet (cTi)
1407
+ 36
1408
+ PSFNet (T2)
1409
+ PSFNetserial (cTi)
1410
+ 35
1411
+ PSFNetserial (T2)
1412
+ 34
1413
+ Nsamples
1414
+ b)
1415
+ Nsubjects
1416
+ 5
1417
+ X
1418
+ 6
1419
+ 97
1420
+ 96
1421
+ (%)
1422
+ SSIM
1423
+ 95
1424
+ PSFNet (cTi)
1425
+ PSFNet (T2)
1426
+ 94
1427
+ PSFNetserial (cTi)
1428
+ PSFNetserial (T2)
1429
+ 93
1430
+ Nsamples12
1431
+ reliably generalizes across tissue contrasts, sampling patterns
1432
+ and acceleration rates.
1433
+ Several prominent approaches have been introduced in
1434
+ the literature to address the training requirements of deep
1435
+ models based on SG priors. One approach is to pre-train
1436
+ models on readily available datasets from a separate source
1437
+ domain and then to fine-tune on several tens of samples
1438
+ from the target domain [28], [59] or else perform SS fine-
1439
+ tuning [76]. This transfer learning approach relaxes the domain
1440
+ requirements for training datasets. However, the domain-
1441
+ transferred models might be suboptimal when training and
1442
+ testing data distributions are divergent. In such cases, additional
1443
+ training for domain-alignment might be necessary to mitigate
1444
+ performance losses. In contrast, PSFNet contains a SS prior that
1445
+ allows it to better generalize to out-of-domain data without
1446
+ further training. Another approach is to build unsupervised
1447
+ models to alleviate dependency on training datasets with paired
1448
+ undersampled, fully-sampled acquisitions. Model training can
1449
+ be performed either directly on undersampled acquisitions
1450
+ via self-supervision [64] or on unpaired sets of undersampled
1451
+ and fully-sampled acquisitions via cycle-consistent learning
1452
+ [77]. This approach can prove beneficial when fully-sampled
1453
+ acquisitions are costly to collect. Nonetheless, the resulting
1454
+ models still require relatively large datasets form tens of
1455
+ subjects during training [64]. Note that our experiments on
1456
+ self-supervised variants of PSFNet and MoDL suggest that
1457
+ unsupervised models can be more demanding for data than their
1458
+ supervised counterparts. Therefore, the data-efficiency benefits
1459
+ of PSFNet might be particularly useful for unsupervised deep
1460
+ MRI reconstruction.
1461
+ A fundamentally different framework to lower requirements
1462
+ on training datasets while offering improved generalizability
1463
+ is based on SS priors. In this case, learning can be performed
1464
+ directly on test data and models can be adapted to each scan
1465
+ [15], [17]. A group of studies have proposed SS methods based
1466
+ on relatively compact nonlinear models to facilitate learning
1467
+ during inference [15], [17], [18], [78]. However, because learn-
1468
+ ing is performed in central k-space, these methods implicitly
1469
+ assume that local relationships among spatial frequency samples
1470
+ are largely invariant across k-space. While the SS prior in
1471
+ PSFNet also rests on a similar assumption, the SG components
1472
+ helps correct residual errors that can be introduced due to this
1473
+ assumption. Another group of studies have alternatively adopted
1474
+ the deep image prior (DIP) approach to build SS methods
1475
+ [19], [20], [22], [23]. In DIP, unconditional deep network
1476
+ models that map latent variables onto images are used as
1477
+ native priors for MR images. The priors are learned by ensuring
1478
+ the consistency of reconstructed and acquired data across the
1479
+ entire k-space. Despite improved generalization, these relatively
1480
+ more complex models require increased inference times. In
1481
+ comparison, PSFNet provides faster inference since the weights
1482
+ for its SG prior are fixed, and its SS prior involves a compact
1483
+ linear operator that is easier to learn.
1484
+ Few independent studies on MRI have proposed approaches
1485
+ related to PSFNet by combining nonlinear and linear recon-
1486
+ structions [6], [17], [78]. Residual RAKI and SPARK methods
1487
+ initially perform a linear reconstruction, and then use an SS
1488
+ method to correct residual errors via minimizing a DC loss in
1489
+ the calibration region [17], [78]. As local relationships among
1490
+ data samples might vary across k-space, the learned SS priors
1491
+ might be suboptimal. Moreover, these methods perform online
1492
+ learning of nonlinear SS priors that introduces relatively high
1493
+ computational burden. In contrast, PSFNet incorporates an SG
1494
+ prior to help improve reliability against sub-optimalities in the
1495
+ SS prior, and uses a linear SS prior for efficiency. Another
1496
+ related method is GrappaNet that improves reconstruction per-
1497
+ formance by cascading GRAPPA and network-based nonlinear
1498
+ reconstruction steps [6]. While [6] intends to improve image
1499
+ quality, the main aim of our study is to improve practicality
1500
+ by lowering training data requirements of deep models, and
1501
+ improving domain generalizability without elevating inference
1502
+ times. Note that GrappaNet follows the conventional unrolling
1503
+ approach by performing serially alternated projections through
1504
+ linear and nonlinear reconstructions, which can lead to error
1505
+ propagation under low-data regimes [79]. In contrast, PSFNet
1506
+ maintains linear and nonlinear reconstructions as two parallel
1507
+ streams in its architecture, and learns to optimally fuse the
1508
+ information from the two streams.
1509
+ The proposed method can be improved along several lines
1510
+ of technical development. First, to improve the capture of high-
1511
+ frequency information by the SG prior, an adversarial loss
1512
+ term along with a discriminator subnetwork can be included
1513
+ in PSFNet [80]. It remains to be demonstrated whether the
1514
+ data-efficiency benefits of PSFNet are apparent for adversarial
1515
+ training setups. Second, nonlinear activation functions can
1516
+ be included in the SS stream to improve the expressiveness
1517
+ of the SS prior [78]. While learning of nonlinear priors can
1518
+ elevate inference complexity, generalization performance might
1519
+ be further improved. Third, the expressiveness of both SS
1520
+ and SG priors might be enhanced by incorporating attention
1521
+ mechanisms as proposed in recent transformer models [81].
1522
+ Fourth, using multimodal image fusion approaches can improve
1523
+ performance in case of having a repository with multimodal
1524
+ data [82], [83]. Lastly, the benefits of transfer learning and
1525
+ PSFNet can be aggregated by pre-training the SG prior on
1526
+ natural images to further lower requirements on training data.
1527
+ VI. ACKNOWLEDGMENTS
1528
+ This work was supported in part by a TUBA GEBIP 2015
1529
+ fellowship, by a BAGEP 2017 fellowship, and by a TUBITAK
1530
+ 121E488 grant awarded to T. C¸ ukur.
1531
+ REFERENCES
1532
+ [1] S. Bauer, R. Wiest, L.-P. Nolte, and M. Reyes, “A survey of mri-based
1533
+ medical image analysis for brain tumor studies,” Physics in Medicine &
1534
+ Biology, vol. 58, no. 13, p. R97, 2013.
1535
+ [2] A. Shoeibi, M. Khodatars, M. Jafari, N. Ghassemi, P. Moridian,
1536
+ R. Alizadehsani, S. H. Ling, A. Khosravi, H. Alinejad-Rokny, H. Lam,
1537
+ M. Fuller-Tyszkiewicz, U. R. Acharya, D. Anderson, Y. Zhang, and
1538
+ J. M. Gorriz, “Diagnosis of brain diseases in fusion of neuroimaging
1539
+ modalities using deep learning: A review,” Information Fusion, vol. 93,
1540
+ pp. 85–117, 2023.
1541
+ [3] M. Hu, X. Qian, S. Liu, A. J. Koh, K. Sim, X. Jiang, C. Guan,
1542
+ and J. H. Zhou, “Structural and diffusion mri based schizophrenia
1543
+ classification using 2d pretrained and 3d naive convolutional neural
1544
+ networks,” Schizophrenia Research, vol. 243, pp. 330–341, 2022.
1545
+ [4] K. R. M. Fernando and C. P. Tsokos, “Deep and statistical learning in
1546
+ biomedical imaging: State of the art in 3d mri brain tumor segmentation,”
1547
+ Information Fusion, vol. 92, pp. 450–465, 2023.
1548
+
1549
+ 13
1550
+ [5] Z. Zhu, X. He, G. Qi, Y. Li, B. Cong, and Y. Liu, “Brain tumor
1551
+ segmentation based on the fusion of deep semantics and edge information
1552
+ in multimodal mri,” Information Fusion, vol. 91, pp. 376–387, 2023.
1553
+ [6] A. Sriram, J. Zbontar, T. Murrell, C. L. Zitnick, A. Defazio, and D. K.
1554
+ Sodickson, “GrappaNet: Combining parallel imaging with deep learning
1555
+ for multi-coil MRI reconstruction,” in Proceedings of the IEEE/CVF
1556
+ Conference on Computer Vision and Pattern Recognition (CVPR), June
1557
+ 2020, pp. 14 303–14 310.
1558
+ [7] K. P. Pruessmann, M. Weiger, M. B. Scheidegger, and P. Boesiger,
1559
+ “SENSE: sensitivity encoding for fast MRI.” Magnetic Resonance in
1560
+ Medicine, vol. 42, no. 5, pp. 952–62, 1999.
1561
+ [8] M. A. Griswold, P. M. Jakob, R. M. Heidemann, M. Nittka, V. Jellus,
1562
+ J. Wang, B. Kiefer, and A. Haase, “Generalized autocalibrating partially
1563
+ parallel acquisitions (GRAPPA),” Magnetic Resonance in Medicine,
1564
+ vol. 47, no. 6, pp. 1202–1210, 2002.
1565
+ [9] M. Lustig, D. Donoho, and J. M. Pauly, “Sparse MRI: The application
1566
+ of compressed sensing for rapid MR imaging,” Magnetic Resonance in
1567
+ Medicine, vol. 58, no. 6, pp. 1182–1195, 2007.
1568
+ [10] A. Majumdar, “Improving synthesis and analysis prior blind compressed
1569
+ sensing with low-rank constraints for dynamic mri reconstruction,”
1570
+ Magnetic resonance imaging, vol. 33, no. 1, pp. 174–179, 2015.
1571
+ [11] M. Lustig and J. M. Pauly, “SPIRiT: Iterative self-consistent parallel
1572
+ imaging reconstruction from arbitrary k-space.” Magnetic Resonance in
1573
+ Medicine, vol. 64, no. 2, pp. 457–71, 2010.
1574
+ [12] Y. Yang, J. Sun, H. Li, and Z. Xu, “ADMM-CSNet: A deep learning
1575
+ approach for image compressive sensing,” IEEE Transactions on Pattern
1576
+ Analysis and Machine Intelligence, vol. 42, no. 3, pp. 521–538, 2020.
1577
+ [13] J. Schlemper, J. Caballero, J. V. Hajnal, A. Price, and D. Rueckert,
1578
+ “A Deep Cascade of Convolutional Neural Networks for MR Image
1579
+ Reconstruction,” in International Conference on Information Processing
1580
+ in Medical Imaging, 2017, pp. 647–658.
1581
+ [14] K. Hammernik, T. Klatzer, E. Kobler, M. P. Recht, D. K. Sodickson,
1582
+ T. Pock, and F. Knoll, “Learning a variational network for reconstruction
1583
+ of accelerated MRI data,” Magnetic Resonance in Medicine, vol. 79,
1584
+ no. 6, pp. 3055–3071, 2017.
1585
+ [15] M. Akc¸akaya, S. Moeller, S. Weing¨artner, and K. U˘gurbil, “Scan-
1586
+ specific robust artificial-neural-networks for k-space interpolation (RAKI)
1587
+ reconstruction: Database-free deep learning for fast imaging,” Magnetic
1588
+ Resonance in Medicine, vol. 81, no. 1, pp. 439–453, 2019.
1589
+ [16] T. H. Kim, P. Garg, and J. P. Haldar, “LORAKI: Autocalibrated recurrent
1590
+ neural networks for autoregressive MRI reconstruction in k-space,” arXiv
1591
+ preprint arXiv:1904.09390, 2019.
1592
+ [17] Y. Arefeen, O. Beker, J. Cho, H. Yu, E. Adalsteinsson, and B. Bilgic,
1593
+ “Scan-specific artifact reduction in k-space (spark) neural networks
1594
+ synergize with physics-based reconstruction to accelerate mri,” Magnetic
1595
+ Resonance in Medicine, vol. 87, no. 2, pp. 764–780, 2022.
1596
+ [18] S. A. H. Hosseini, C. Zhang, S. Weing¨artner, S. Moeller, M. Stuber,
1597
+ K. Ugurbil, and M. Akc¸akaya, “Accelerated coronary MRI with sRAKI:
1598
+ A database-free self-consistent neural network k-space reconstruction for
1599
+ arbitrary undersampling,” PLOS ONE, vol. 15, no. 2, pp. 1–13, 2020.
1600
+ [19] Y. Korkmaz, S. U. Dar, M. Yurt, M. ¨Ozbey, and T. Cukur, “Unsupervised
1601
+ mri reconstruction via zero-shot learned adversarial transformers,” IEEE
1602
+ Transactions on Medical Imaging, vol. 41, no. 7, pp. 1747–1763, 2022.
1603
+ [20] S. Arora, V. Roeloffs, and M. Lustig, “Untrained modified deep decoder
1604
+ for joint denoising and parallel imaging reconstruction,” in Proceedings
1605
+ of the 28th Annual Meeting of the ISMRM, 2020, p. 3585.
1606
+ [21] M. Z. Darestani and R. Heckel, “Accelerated mri with un-trained neural
1607
+ networks,” IEEE Transactions on Computational Imaging, vol. 7, pp.
1608
+ 724–733, 2021.
1609
+ [22] D. Narnhofer, K. Hammernik, F. Knoll, and T. Pock, “Inverse GANs
1610
+ for accelerated MRI reconstruction,” in Proceedings of the SPIE, vol.
1611
+ 11138, 2019, pp. 381 – 392.
1612
+ [23] K. C. Tezcan, C. F. Baumgartner, R. Luechinger, K. P. Pruessmann, and
1613
+ E. Konukoglu, “MR image reconstruction using deep density priors,”
1614
+ IEEE Transactions on Medical Imaging, vol. 38, no. 7, pp. 1633–1642,
1615
+ 2019.
1616
+ [24] Q. Liu, Q. Yang, H. Cheng, S. Wang, M. Zhang, and D. Liang,
1617
+ “Highly undersampled magnetic resonance imaging reconstruction using
1618
+ autoencoding priors,” Magnetic Resonance in Medicine, vol. 83, no. 1,
1619
+ pp. 322–336, 2020.
1620
+ [25] T. Eo, Y. Jun, T. Kim, J. Jang, H.-J. Lee, and D. Hwang, “KIKI-net: cross-
1621
+ domain convolutional neural networks for reconstructing undersampled
1622
+ magnetic resonance images,” Magnetic Resonance in Medicine, vol. 80,
1623
+ no. 5, pp. 2188–2201, 2018.
1624
+ [26] M. Mardani, E. Gong, J. Y. Cheng, S. Vasanawala, G. Zaharchuk,
1625
+ L. Xing, and J. M. Pauly, “Deep generative adversarial neural networks
1626
+ for compressive sensing MRI,” IEEE Transactions on Medical Imaging,
1627
+ vol. 38, no. 1, pp. 167–179, 2019.
1628
+ [27] H. K. Aggarwal, M. P. Mani, and M. Jacob, “MoDL: Model-Based
1629
+ deep learning architecture for inverse problems,” IEEE Transactions on
1630
+ Medical Imaging, vol. 38, no. 2, pp. 394–405, 2019.
1631
+ [28] S. U. H. Dar, M. ¨Ozbey, A. B. C¸ atlı, and T. C¸ ukur, “A transfer-learning
1632
+ approach for accelerated MRI using deep neural networks,” Magnetic
1633
+ Resonance in Medicine, vol. 84, no. 2, pp. 663–685, 2020.
1634
+ [29] D. Lee, J. Yoo, S. Tak, and J. C. Ye, “Deep residual learning for accel-
1635
+ erated MRI using magnitude and phase networks,” IEEE Transactions
1636
+ on Biomedical Engineering, vol. 65, no. 9, pp. 1985–1995, 2018.
1637
+ [30] P. Guo, J. M. J. Valanarasu, P. Wang, J. Zhou, S. Jiang, and V. M. Patel,
1638
+ “Over-and-under complete convolutional rnn for mri reconstruction,” in
1639
+ International Conference on Medical Image Computing and Computer-
1640
+ Assisted Intervention.
1641
+ Springer, 2021, pp. 13–23.
1642
+ [31] G. Yiasemis, J.-J. Sonke, C. S´anchez, and J. Teuwen, “Recurrent
1643
+ variational network: A deep learning inverse problem solver applied
1644
+ to the task of accelerated mri reconstruction,” in Proceedings of the
1645
+ IEEE/CVF Conference on Computer Vision and Pattern Recognition,
1646
+ 2022, pp. 732–741.
1647
+ [32] R. Hou and F. Li, “Idpcnn: Iterative denoising and projecting cnn for
1648
+ mri reconstruction,” Journal of Computational and Applied Mathematics,
1649
+ vol. 406, p. 113973, 2022.
1650
+ [33] Z. Ramzi, G. Chaithya, J.-L. Starck, and P. Ciuciu, “Nc-pdnet: a
1651
+ density-compensated unrolled network for 2d and 3d non-cartesian mri
1652
+ reconstruction,” IEEE Transactions on Medical Imaging, 2022.
1653
+ [34] K. Kwon, D. Kim, and H. Park, “A parallel MR imaging method using
1654
+ multilayer perceptron,” Medical Physics, vol. 44, no. 12, pp. 6209–6224,
1655
+ 2017.
1656
+ [35] S. Wang, Z. Su, L. Ying, X. Peng, S. Zhu, F. Liang, D. Feng, and
1657
+ D. Liang, “Accelerating magnetic resonance imaging via deep learning,”
1658
+ in IEEE 13th International Symposium on Biomedical Imaging (ISBI),
1659
+ 2016, pp. 514–517.
1660
+ [36] J. C. Ye, Y. Han, and E. Cha, “Deep convolutional framelets: A general
1661
+ deep learning framework for inverse problems,” SIAM Journal on Imaging
1662
+ Sciences, vol. 11, no. 2, pp. 991–1048, 2018.
1663
+ [37] J. Yoon, E. Gong, I. Chatnuntawech, B. Bilgic, J. Lee, W. Jung, J. Ko,
1664
+ H. Jung, K. Setsompop, G. Zaharchuk, E. Y. Kim, J. Pauly, and J. Lee,
1665
+ “Quantitative susceptibility mapping using deep neural network: QSMnet,”
1666
+ NeuroImage, vol. 179, pp. 199–206, 2018.
1667
+ [38] C. M. Hyun, H. P. Kim, S. M. Lee, S. Lee, and J. K. Seo, “Deep learning
1668
+ for undersampled MRI reconstruction,” Physics in Medicine and Biology,
1669
+ vol. 63, no. 13, p. 135007, 2018.
1670
+ [39] A. Hauptmann, S. Arridge, F. Lucka, V. Muthurangu, and J. A. Steeden,
1671
+ “Real-time cardiovascular MR with spatio-temporal artifact suppression
1672
+ using deep learning–proof of concept in congenital heart disease,”
1673
+ Magnetic Resonance in Medicine, vol. 81, no. 2, pp. 1143–1156, 2019.
1674
+ [40] S. A. H. Hosseini, B. Yaman, S. Moeller, M. Hong, and M. Akc¸akaya,
1675
+ “Dense recurrent neural networks for accelerated MRI: History-cognizant
1676
+ unrolling of optimization algorithms,” IEEE Journal of Selected Topics
1677
+ in Signal Processing, vol. 14, no. 6, pp. 1280–1291, 2020.
1678
+ [41] C. Qin, J. Schlemper, J. Caballero, A. N. Price, J. V. Hajnal, and
1679
+ D. Rueckert, “Convolutional recurrent neural networks for dynamic MR
1680
+ image reconstruction,” IEEE Transactions on Medical Imaging, vol. 38,
1681
+ no. 1, pp. 280–290, 2019.
1682
+ [42] T. M. Quan, T. Nguyen-Duc, and W.-K. Jeong, “Compressed sensing
1683
+ MRI reconstruction with cyclic loss in generative adversarial networks,”
1684
+ IEEE Transactions on Medical Imaging, vol. 37, no. 6, pp. 1488–1497,
1685
+ 2018.
1686
+ [43] S. U. Dar, M. Yurt, M. Shahdloo, M. E. Ildız, B. Tınaz, and T. C¸ ukur,
1687
+ “Prior-guided image reconstruction for accelerated multi-contrast MRI
1688
+ via generative adversarial networks,” IEEE Journal of Selected Topics in
1689
+ Signal Processing, vol. 14, no. 6, pp. 1072–1087, 2020.
1690
+ [44] Y. Chen, D. Firmin, and G. Yang, “Wavelet improved GAN for MRI
1691
+ reconstruction,” in Proceedings of SPIE, Medical Imaging 2021: Physics
1692
+ of Medical Imaging, vol. 11595, 2021, p. 1159513.
1693
+ [45] G. Elmas, S. U. Dar, Y. Korkmaz, E. Ceyani, B. Susam, M. Ozbey,
1694
+ S. Avestimehr, and T. C¸ ukur, “Federated learning of generative image
1695
+ priors for mri reconstruction,” IEEE Transactions on Medical Imaging,
1696
+ 2022.
1697
+ [46] M. Yaqub, F. Jinchao, S. Ahmed, K. Arshid, M. A. Bilal, M. P. Akhter,
1698
+ and M. S. Zia, “Gan-tl: Generative adversarial networks with transfer
1699
+ learning for mri reconstruction,” Applied Sciences, vol. 12, no. 17, p.
1700
+ 8841, 2022.
1701
+ [47] Y. Korkmaz, M.
1702
+ ¨Ozbey, and T. Cukur, “Mri reconstruction with
1703
+ conditional adversarial transformers,” in International Workshop on
1704
+
1705
+ 14
1706
+ Machine Learning for Medical Image Reconstruction.
1707
+ Springer, 2022,
1708
+ pp. 62–71.
1709
+ [48] P. Guo, Y. Mei, J. Zhou, S. Jiang, and V. M. Patel, “Reconformer:
1710
+ Accelerated mri reconstruction using recurrent transformer,” arXiv
1711
+ preprint arXiv:2201.09376, 2022.
1712
+ [49] A. G¨ung¨or, S. U. Dar, S¸. ¨Ozt¨urk, Y. Korkmaz, G. Elmas, M. ¨Ozbey, and
1713
+ T. C¸ ukur, “Adaptive diffusion priors for accelerated mri reconstruction,”
1714
+ arXiv:2207.05876, 2022.
1715
+ [50] C. Peng, P. Guo, S. K. Zhou, V. M. Patel, and R. Chellappa, “Towards per-
1716
+ formant and reliable undersampled mr reconstruction via diffusion model
1717
+ sampling,” in International Conference on Medical Image Computing
1718
+ and Computer-Assisted Intervention.
1719
+ Springer, 2022, pp. 623–633.
1720
+ [51] K. Wang, J. I. Tamir, A. De Goyeneche, U. Wollner, R. Brada, S. X. Yu,
1721
+ and M. Lustig, “High fidelity deep learning-based mri reconstruction with
1722
+ instance-wise discriminative feature matching loss,” Magnetic Resonance
1723
+ in Medicine, vol. 88, no. 1, pp. 476–491, 2022.
1724
+ [52] F. Gadjimuradov, T. Benkert, M. D. Nickel, and A. Maier, “Robust partial
1725
+ fourier reconstruction for diffusion-weighted imaging using a recurrent
1726
+ convolutional neural network,” Magnetic Resonance in Medicine, vol. 87,
1727
+ no. 4, pp. 2018–2033, 2022.
1728
+ [53] Z. Ramzi, C. G R, J.-L. Starck, and P. Ciuciu, “Nc-pdnet: A density-
1729
+ compensated unrolled network for 2d and 3d non-cartesian mri recon-
1730
+ struction,” IEEE Transactions on Medical Imaging, vol. 41, no. 7, pp.
1731
+ 1625–1638, 2022.
1732
+ [54] D. Polak, S. Cauley, B. Bilgic, E. Gong, P. Bachert, E. Adalsteinsson, and
1733
+ K. Setsompop, “Joint multi-contrast variational network reconstruction
1734
+ (jVN) with application to rapid 2D and 3D imaging,” Magnetic Resonance
1735
+ in Medicine, vol. 84, no. 3, pp. 1456–1469, 2020.
1736
+ [55] J. Y. Cheng, M. Mardani, M. T. Alley, J. M. Pauly, and S. S. Vasanawala,
1737
+ “Deepspirit: Generalized parallel imaging using deep convolutional neural
1738
+ networks,” in Proceedings of the 26th Annual Meeting of the ISMRM,
1739
+ 2018, p. 0570.
1740
+ [56] K. Pooja, Z. Ramzi, G. Chaithya, and P. Ciuciu, “Mc-pdnet: Deep
1741
+ unrolled neural network for multi-contrast mr image reconstruction from
1742
+ undersampled k-space data,” in 2022 IEEE 19th International Symposium
1743
+ on Biomedical Imaging (ISBI), 2022, pp. 1–5.
1744
+ [57] N. Tavaf, A. Torfi, K. Ugurbil, and P.-F. Van de Moortele, “Grappa-gans
1745
+ for parallel mri reconstruction,” arXiv preprint arXiv:2101.03135, 2021.
1746
+ [58] C. M. Sandino, P. Lai, S. S. Vasanawala, and J. Y. Cheng, “Accelerating
1747
+ cardiac cine mri using a deep learning-based espirit reconstruction,”
1748
+ Magnetic Resonance in Medicine, vol. 85, no. 1, pp. 152–167, 2021.
1749
+ [59] F. Knoll, K. Hammernik, E. Kobler, T. Pock, M. P. Recht, and D. K.
1750
+ Sodickson, “Assessment of the generalization of learned image recon-
1751
+ struction and the potential for transfer learning,” Magnetic Resonance in
1752
+ Medicine, vol. 81, no. 1, pp. 116–128, 2019.
1753
+ [60] A. S. Chaudhari, C. M. Sandino, E. K. Cole, D. B. Larson, G. E.
1754
+ Gold, S. S. Vasanawala, M. P. Lungren, B. A. Hargreaves, and C. P.
1755
+ Langlotz, “Prospective deployment of deep learning in mri: A framework
1756
+ for important considerations, challenges, and recommendations for best
1757
+ practices,” Journal of Magnetic Resonance Imaging, vol. 54, no. 2, pp.
1758
+ 357–371, 2021.
1759
+ [61] S. U. H. Dar, M. Yurt, and T. C¸ ukur, “A few-shot learning approach for
1760
+ accelerated mri via fusion of data-driven and subject-driven priors,” in
1761
+ Proceedings of the 29th Annual Meeting of the ISMRM, 2021, p. 1949.
1762
+ [62] J. I. Tamir, S. X. Yu, and M. Lustig, “Unsupervised deep basis pursuit:
1763
+ Learning reconstruction without ground-truth data,” in Proceedings of
1764
+ the 27th Annual Meeting of the ISMRM, 2019, p. 0660.
1765
+ [63] E. K. Cole, F. Ong, S. S. Vasanawala, and J. M. Pauly, “Fast unsupervised
1766
+ mri reconstruction without fully-sampled ground truth data using genera-
1767
+ tive adversarial networks,” in Proceedings of the IEEE/CVF International
1768
+ Conference on Computer Vision (ICCV) Workshops, October 2021, pp.
1769
+ 3988–3997.
1770
+ [64] B. Yaman, S. A. H. Hosseini, S. Moeller, J. Ellermann, K. U˘gurbil, and
1771
+ M. Akc¸akaya, “Self-supervised learning of physics-guided reconstruc-
1772
+ tion neural networks without fully sampled reference data,” Magnetic
1773
+ resonance in medicine, vol. 84, no. 6, pp. 3172–3191, 2020.
1774
+ [65] J. Liu, Y. Sun, C. Eldeniz, W. Gan, H. An, and U. S. Kamilov, “RARE:
1775
+ Image reconstruction using deep priors learned without groundtruth,”
1776
+ IEEE Journal of Selected Topics in Signal Processing, vol. 14, no. 6, pp.
1777
+ 1088–1099, 2020.
1778
+ [66] S. Wang, R. Wu, C. Li, J. Zou, Z. Zhang, Q. Liu, Y. Xi, and H. Zheng,
1779
+ “PARCEL: Physics-based Unsupervised Contrastive Representation Learn-
1780
+ ing for Multi-coil MR Imaging,” arXiv:2202.01494, 2022.
1781
+ [67] S. A. H. Hosseini, C. Zhang, K. Uˇgurbil, S. Moeller, and M. Akc¸akaya,
1782
+ “sraki-rnn: accelerated mri with scan-specific recurrent neural networks
1783
+ using densely connected blocks,” in Wavelets and Sparsity XVIII, vol.
1784
+ 11138. International Society for Optics and Photonics, 2019, p. 111381B.
1785
+ [68] Q. Huang, Y. Xian, D. Yang, H. Qu, J. Yi, P. Wu, and D. N. Metaxas,
1786
+ “Dynamic mri reconstruction with end-to-end motion-guided network,”
1787
+ Medical Image Analysis, vol. 68, p. 101901, 2021.
1788
+ [69] J. Huang, S. Wang, G. Zhou, W. Hu, and G. Yu, “Evaluation on
1789
+ the generalization of a learned convolutional neural network for mri
1790
+ reconstruction,” Magnetic Resonance Imaging, vol. 87, pp. 38–46, 2022.
1791
+ [70] D. Ulyanov, A. Vedaldi, and V. Lempitsky, “Deep image prior,” in
1792
+ Proceedings of the IEEE Conference on Computer Vision and Pattern
1793
+ Recognition (CVPR), 2018, pp. 9446–9454.
1794
+ [71] M. Uecker, P. Lai, M. J. Murphy, P. Virtue, M. Elad, J. M. Pauly,
1795
+ S. S. Vasanawala, and M. Lustig, “ESPIRiT-an eigenvalue approach to
1796
+ autocalibrating parallel MRI: Where SENSE meets GRAPPA,” Magnetic
1797
+ Resonance in Medicine, vol. 71, no. 3, pp. 990–1001, 2014.
1798
+ [72] F. Knoll, J. Zbontar, A. Sriram, M. J. Muckley, M. Bruno, A. Defazio,
1799
+ M. Parente, K. J. Geras, J. Katsnelson, H. Chandarana, Z. Zhang,
1800
+ M. Drozdzalv, A. Romero, M. Rabbat, P. Vincent, J. Pinkerton, D. Wang,
1801
+ N. Yakubova, E. Owens, C. L. Zitnick, M. P. Recht, D. K. Sodickson,
1802
+ and Y. W. Lui, “fastMRI: A publicly available raw k-space and DICOM
1803
+ dataset of knee images for accelerated MR image reconstruction using
1804
+ machine learning,” Radiology: Artificial Intelligence, vol. 2, no. 1, p.
1805
+ e190007, 2020.
1806
+ [73] T. Zhang, J. M. Pauly, S. S. Vasanawala, and M. Lustig, “Coil compression
1807
+ for accelerated imaging with Cartesian sampling.” Magnetic Resonance
1808
+ in Medicine, vol. 69, no. 2, pp. 571–82, 2013.
1809
+ [74] D. P. Kingma and J. L. Ba, “Adam: a Method for Stochastic Optimization,”
1810
+ in International Conference on Learning Representations, 2015.
1811
+ [75] O. Dalmaz, U. Mirza, G. Elmas, M. ¨Ozbey, S. U. Dar, E. Ceyani,
1812
+ S. Avestimehr, and T. C¸ ukur, “One model to unite them all: Personalized
1813
+ federated learning of multi-contrast mri synthesis,” arXiv:2207.06509,
1814
+ 2022.
1815
+ [76] S. A. H. Hosseini, B. Yaman, S. Moeller, and M. Akc¸akaya, “High-fidelity
1816
+ accelerated mri reconstruction by scan-specific fine-tuning of physics-
1817
+ based neural networks,” in 2020 42nd Annual International Conference
1818
+ of the IEEE Engineering in Medicine Biology Society (EMBC), 2020,
1819
+ pp. 1481–1484.
1820
+ [77] K. Lei, M. Mardani, J. M. Pauly, and S. S. Vasanawala, “Wasserstein
1821
+ GANs for MR imaging: from paired to unpaired training,” IEEE
1822
+ transactions on medical imaging, vol. 40, no. 1, pp. 105–115, 2021.
1823
+ [78] C. Zhang, S. A. H. Hosseini, S. Moeller, S. Weing¨artner, K. Ugurbil,
1824
+ and M. Akcakaya, “Scan-specific residual convolutional neural networks
1825
+ for fast mri using residual raki,” in 2019 53rd Asilomar Conference on
1826
+ Signals, Systems, and Computers, 2019, pp. 1476–1480.
1827
+ [79] M. Murphy, M. Alley, J. Demmel, K. Keutzer, S. Vasanawala, and
1828
+ M. Lustig, “Fast l1-SPIRiT compressed sensing parallel imaging MRI:
1829
+ scalable parallel implementation and clinically feasible runtime.” IEEE
1830
+ transactions on medical imaging, vol. 31, no. 6, pp. 1250–1262, 2012.
1831
+ [80] M. ¨Ozbey, O. Dalmaz, S. U. Dar, H. A. Bedel, S¸. ¨Ozturk, A. G¨ung¨or,
1832
+ and T. C¸ ukur, “Unsupervised medical image translation with adversarial
1833
+ diffusion models,” arXiv:2207.08208, 2022.
1834
+ [81] O. Dalmaz, M. Yurt, and T. C¸ ukur, “ResViT: Residual vision transformers
1835
+ for multi-modal medical image synthesis,” IEEE Transactions on Medical
1836
+ Imaging, vol. 41, no. 7, pp. 2598–2614, 2022.
1837
+ [82] H. Zhou, J. Hou, Y. Zhang, J. Ma, and H. Ling, “Unified gradient- and
1838
+ intensity-discriminator generative adversarial network for image fusion,”
1839
+ Information Fusion, vol. 88, pp. 184–201, 2022.
1840
+ [83] J. Liu, R. Dian, S. Li, and H. Liu, “Sgfusion: A saliency guided deep-
1841
+ learning framework for pixel-level image fusion,” Information Fusion,
1842
+ vol. 91, pp. 205–214, 2023.
1843
+
39E0T4oBgHgl3EQfvAGt/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
4NE0T4oBgHgl3EQfeQCD/content/tmp_files/2301.02388v1.pdf.txt ADDED
@@ -0,0 +1,717 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Generating corneal panoramic images from contact specular microscope images
2
+ Yusuke Nagira1†, Yuzuha Hara2, Satoru Hiwa2
3
+ Naoki Okumura3, Noriko Koizumi3 and Tomoyuki Hiroyasu2
4
+ 1Graduate School of Life and Medical Sciences, Doshisha University, Japan
5
+ 2Department of Biomedical Sciences and Informatics, Doshisha University, Japan
6
+ 3Department of Biomedical Engineering, Faculty of Life and Medical Sciences, Doshisha University, Japan
7
+ (Tel: +81-774-65-6020, E-mail: tomo@is.doshisha.ac.jp)
8
+ Abstract: The contact specular microscope has a wider angle of view than that of the non-contact specular microscope but still
9
+ cannot capture an image of the entire cornea. To obtain such an image, it is necessary to prepare film on the parts of the image
10
+ captured sequentially and combine them to create a complete image. This study proposes a framework to automatically generate
11
+ an entire corneal image from videos captured using a contact specular microscope. Relatively focused images were extracted
12
+ from the videos and panoramic compositing was performed. If an entire image can be generated, it is possible to detect guttae
13
+ from the image and examine the extent of their presence. The system was implemented and the effectiveness of the proposed
14
+ framework was examined. The system was implemented using custom-made composite software, Image Composite Software
15
+ (ICS, K.I. Technology Co., Ltd., Japan, internal algorithms not disclosed), and a supervised learning model using U-Net was
16
+ used for guttae detection. Several images were correctly synthesized when the constructed system was applied to 94 different
17
+ corneal videos obtained from Fuchs endothelial corneal dystrophy (FECD) mouse model. The implementation and application
18
+ of the method to the data in this study confirmed its effectiveness. Owing to the minimal quantitative evaluation performed, such
19
+ as accuracy with implementation, it may pose some limitations for future investigations.
20
+ Keywords: U-Net, Semantic Segmentation, Fuchs Endothelial Corneal Dystrophy, Corneal Endothelial Cell
21
+ 1. INTRODUCTION
22
+ Fuchs endothelial corneal dystrophy (FECD) is a bilat-
23
+ eral disease, wherein corneal endothelial cells are unable to
24
+ maintain their hexagonal shape. It is characterized by the
25
+ accelerated loss of corneal endothelial cells with changes in
26
+ Descemet’s membrane, resulting in the formation of an ex-
27
+ tracellular matrix called guttae [1]. In the United States, it
28
+ is estimated that 4% of people over 40 years of age are af-
29
+ fected by the disease, occurring more commonly in women
30
+ and more frequently in people in their 40s and 50s. Corneal
31
+ endothelial pump function is lost as the disease progresses,
32
+ causing corneal edema [2]. Presently, corneal transplanta-
33
+ tion is the only reliable treatment, and FECD accounts for
34
+ 39% of all corneal transplants performed, making it the most
35
+ common cause of corneal transplantation worldwide [3].
36
+ Rho kinase inhibitors have been reported to promote cell
37
+ proliferation and adhesion to substrates, inhibit corneal en-
38
+ dothelial cell apoptosis, and promote wound healing. There-
39
+ fore, using Rho kinase inhibitor eye drops is a potential
40
+ novel treatment approach alternative to corneal transplanta-
41
+ tion [4][5].
42
+ In drug discovery research for FECD, the state of the
43
+ corneal endothelium, such as the guttae, is observed before
44
+ and after the drug use and evaluated based on the increase
45
+ or decrease in the number of cells. Doctors and researchers
46
+ widely use specular microscopes to observe the state of the
47
+ corneal endothelium.
48
+ However, the range of the micro-
49
+ scope’s imaging capability is limited and the current practice
50
+ estimates the state of the entire cornea from its center. The
51
+ endothelial cell density (ECD) is essential for understanding
52
+ † Yusuke Nagira is the presenter of this paper.
53
+ the pathogenesis of FECD. However, ECD cannot be mea-
54
+ sured accurately owing to the presence of guttae; hence, the
55
+ number of cells is measured manually [6].
56
+ Using a mouse model to demonstrate the pathogenesis of
57
+ FECD, studies have been conducted on the segmentation of
58
+ guttae using U-Net and the calculation of cell density in areas
59
+ excluding guttae [7][8]. In previous studies on the panoramic
60
+ synthesis of the corneal endothelium, focused images were
61
+ extracted manually and stitched together. Panoramic com-
62
+ positing of the entire cornea is yet to be performed because
63
+ the images were localized panoramic images and did not rep-
64
+ resent the entire cornea [9]. Conventional panorama synthe-
65
+ sis software such as AutoStitch [10] does not consider the
66
+ order of the input images used for synthesis. Instead, it ex-
67
+ tracts the image features using Scale-Invariant Feature Trans-
68
+ form (SIFT) [11] and stitches the matching features together.
69
+ When pasting an image, it is deformed and scaled. For im-
70
+ ages with similar characteristics, such as corneal endothelial
71
+ cells, the position of the image to be pasted may be incor-
72
+ rect or the shape of the cells may be deformed owing to the
73
+ deformation of the image. In this case, accurate values can-
74
+ not be obtained when calculating the area of the cells or the
75
+ percentage of guttae. Therefore, the original image needed
76
+ to have as minimal deformations as possible in the combined
77
+ image. Alternatively, it is necessary to provide a mechanism
78
+ to access the original version of the image of interest.
79
+ This study proposes a framework for a system that gen-
80
+ erates images of the entire corneal endothelium from videos
81
+ obtained using a contact specular microscope. In the pro-
82
+ posed framework, the focused images are extracted from the
83
+ video images, feature extraction is performed, and the im-
84
+ ages are synthesized. During synthesis, the system reduces
85
+ arXiv:2301.02388v1 [eess.IV] 6 Jan 2023
86
+
87
+ or deforms the original image to the minimum and adds a
88
+ feature that allows access to the original image of the area
89
+ of interest. This study examined the proposed framework by
90
+ building a system using two implementation methods. Fur-
91
+ ther, we added a function for automatically detecting guttae
92
+ using U-Net, a type of Deep Learning. This study presents a
93
+ proposed framework and an example of its implementation.
94
+ Quantitative evaluation of whole corneal endothelial images
95
+ and gut detection is insufficient, which poses a limitation that
96
+ must be addressed in future studies.
97
+ 2. FRAMEWORK OF CORNEAL PANORAMIC
98
+ IMAGE GENERATION FROM CONTACT
99
+ SPECULAR MICROSCOPE IMAGES
100
+ 2.1. Overview of the proposed framework
101
+ Fig.1 presents an overview of the proposed framework.
102
+ An image of the entire cornea was generated from a video
103
+ frame of the cornea captured using a contact-type specu-
104
+ lar microscope. First, a focused still image was extracted
105
+ from the target video image (dataset 1). Second, the features
106
+ of still images were extracted. Third, the matching feature
107
+ points were combined to create a panoramic image. Next,
108
+ the panoramic image was divided into grid regions and the
109
+ most focused image was selected from each region. Guttae
110
+ were detected in the combined panoramic images using deep
111
+ learning. Additionally, still images of the corneal endothe-
112
+ lium containing the guttae and mask images exhibiting the
113
+ location of the guttae were used for the model generation
114
+ (dataset 2).
115
+ 2.2. Extraction of in-focus images
116
+ A group of still images was extracted from the captured
117
+ videos. The entire corneal endothelium was captured and
118
+ converted to a single frame-by-frame image. If there were
119
+ N frames in the video, N images were the output in total
120
+ that were then divided into five groups in chronological order.
121
+ The image with the highest in-focus evaluation index was
122
+ selected from each group.
123
+ 2.3. Creating panoramic images
124
+ The algorithm for generating a single panoramic image is
125
+ accomplished through the following steps. First, characteris-
126
+ tic points in the images were extracted. Subsequently, a curve
127
+ connecting the characteristic points was obtained. Here, the
128
+ optimal curve connecting the extracted characteristic points
129
+ was obtained. This curve was then used to enlarge or interpo-
130
+ late the image. A panoramic image was generated. In the fol-
131
+ lowing experiments, two algorithms were prepared and the
132
+ results were compared.
133
+ 2.4. Image sharpening process
134
+ The synthesized panoramic images were mostly over-
135
+ lapped images. Additionally, the synthesized image is often
136
+ blurred because of the transparency and brightness of the im-
137
+ age change. Therefore, a method was developed to obtain
138
+ clearer images. The implementation of the sharpening pro-
139
+ cess is described in the next section.
140
+ 2.5. Creating the Guttae Classifier by U-Net
141
+ The U-Net is a neural network commonly used for image
142
+ segmentation. U-Net uses a convolutional neural network to
143
+ encode an input image as a feature map, subsequently decod-
144
+ ing that feature map to separate specific objects in the input
145
+ image. It is possible to prepare a dataset of corneal images
146
+ and train U-Net using this dataset to generate a model for
147
+ extracting the guttae.
148
+ 3. SYSTEM IMPLEMENTATION AND DATA
149
+ APPLICATION
150
+ 3.1. Outline
151
+ This study implemented a system to confirm the effec-
152
+ tiveness of the proposed framework.
153
+ Corneal videos ob-
154
+ tained from the FECD mouse model were processed to obtain
155
+ panoramic images of the cornea. Tenengrad was used to ob-
156
+ tain the in-focus images. Finally, two different applications
157
+ were used to generate panoramic images.
158
+ 3.2. Mouse Model of FECD
159
+ This study used images of whole corneal endothelial cells
160
+ from the FECD pathology mouse model. A single nucleotide
161
+ mutation in COL8A2 generated these genes, and it has been
162
+ reported that guttae increase over time. The Tissue Engineer-
163
+ ing Laboratory, Graduate School of Biomedical Sciences,
164
+ Doshisha University, provided the images.
165
+ The images
166
+ were taken using a prototype KSSP slit-scanning wide-field
167
+ contact specular microscope (Konan Medical, Inc., Nishi-
168
+ nomiya, Japan), had a resolution of 1620 × 1080 [pixels] at
169
+ a frame rate of 29.9 [fps] in MOV file format. The images
170
+ were taken in the following order: 1) starting from the center
171
+ of the cornea; 2) moving to the top of the cornea; 3) mov-
172
+ ing to the left; 4) filming from the top to the bottom of the
173
+ cornea; 5) moving around the right side; 6) filming from the
174
+ bottom to the center of the cornea. In this study, 94 videos
175
+ were prepared and used.
176
+ 3.3. Extraction of in-focus images by Tenengrad
177
+ The focus evaluation index was calculated as follows:
178
+ Tenengrad [12][13] value, which is the gradient of the im-
179
+ age based on the pixel value, is calculated for each region,
180
+ where Gx and Gy are the convolved values of the Sobel op-
181
+ erator of the pixel values in the x-direction and y-direction,
182
+ respectively.
183
+ Φx,y =
184
+
185
+ (i,j)∈Ω(x,y)
186
+ (Gx(i, j)2 + Gy(i, j)2)
187
+ (1)
188
+ The Sobel operator is expressed as
189
+ Kx =
190
+
191
+
192
+ −1
193
+ 0
194
+ 1
195
+ −2
196
+ 0
197
+ 2
198
+ −1
199
+ 0
200
+ 1
201
+
202
+ � , Ky =
203
+
204
+
205
+ −1
206
+ −2
207
+ −1
208
+ 0
209
+ 0
210
+ 0
211
+ 1
212
+ 2
213
+ 1
214
+
215
+
216
+ The highest value in the quadratic area of a single image
217
+ was considered the focal value for that image. When this
218
+ value was calculated for the entire image, the gradient was
219
+ smaller in the area containing the edge of the corneal en-
220
+ dothelium. In contrast, the gradient increased in the area
221
+ containing corneal endothelial cells. Thus, as the area of
222
+
223
+ Image integration for
224
+ panoramic image of the entire cornea
225
+ Learning Phase
226
+
227
+ U-Net
228
+ Corneal endothelial
229
+ images including Guttae
230
+ Mask image annotated
231
+ with Guttae locations
232
+ Dataset 2
233
+ Prediction Phase
234
+ Modeled U-Net
235
+ Video of the entire
236
+ cornea
237
+ Dataset 1
238
+ • Pre-processing
239
+ • Focused image
240
+ extraction
241
+ • Feature extraction
242
+ • Image integration
243
+ • Sharpen process of
244
+ panorama images
245
+ Final panoramic image
246
+ Guttae prediction
247
+ Fig. 1. Overview of the proposed framework
248
+ the rim increases, the Tenengrad value for the entire image
249
+ becomes smaller. This procedure prevents the corneal en-
250
+ dothelium from being excluded from the image even if it is
251
+ appropriately captured.
252
+ 3.4. Creating the idealized panoramic artificial CECs
253
+ image data
254
+ To confirm the effectiveness of the panorama synthesis
255
+ software, a set of idealized panoramic artificial corneal en-
256
+ dothelial cell images were created with no blurring or focus
257
+ mismatch on the extracted images. These images were ob-
258
+ tained using the GNU Image Manipulation Program (GIMP).
259
+ These artificial images were created based on the synthesis
260
+ results obtained using the panorama synthesis software de-
261
+ scribed below. A layer was added to the composite image,
262
+ the cell membrane of the corneal endothelium was traced,
263
+ and the areas considered to be guttae were painted black. The
264
+ color of the surrounding endothelial cells was extracted from
265
+ the layer depicting the cell membrane and guttae using the
266
+ color picker function. The layers are filled with the same
267
+ color. This process was applied to the entire cornea to create
268
+ artificial images. For areas where the cell membrane was not
269
+ visible owing to issues such as focus mismatch or blurring,
270
+ the cell membrane was depicted by referring to another im-
271
+ age in which the cell membrane could be observed appropri-
272
+ ately. Idealized panoramic artificial corneal endothelial cell
273
+ images were created that mimicked the distribution and size
274
+ of the guttae, as well as the size and shape of the corneal en-
275
+ dothelial cells. The created image was 1870 × 1080 [pixel]
276
+ in size and was cropped and stored by moving approximately
277
+ 10 [pixels] from the center to the top, counterclockwise from
278
+ the top, and counterclockwise from top to bottom, mimick-
279
+ ing the movement of a motion camera.
280
+ 3.5. Creating panoramic images
281
+ In the composite process, two types of applications were
282
+ used; the Image Composite Software (ICS) and the panorama
283
+ synthesis algorithm implemented in OpenCV. The algo-
284
+ rithms are explained as follows.
285
+ 3.5.1. Image Composite Software (ICS)
286
+ Image Composite Software (K.I. Technology CO., LTD.,
287
+ Yokohama, Kanagawa, Japan) is a panorama compositive
288
+ software created with specifications suitable for image com-
289
+ positing corneal endothelial images. This application was
290
+ used for the composite process and was custom-made. Since
291
+ this is a commercial application, we cannot explain the de-
292
+ tails of its contents due to copyright. The original image is
293
+ not reduced or enlarged when the images are superimposed.
294
+ Additionally, an API to access the original image is provided,
295
+ allowing quick access to the original image of the area of in-
296
+ terest. A flowchart of the panorama compositing process is
297
+ shown in Algorithm 1, where the first image is used as the
298
+ reference image, and the regions that match the first image
299
+ are searched in order of image number. If no match is ob-
300
+ served, the image merged with the reference image is used
301
+ as the reference for the subsequent image, and the process is
302
+ repeated. The image merging is terminated when ten consec-
303
+ utive images are not observed to match the reference image.
304
+ 3.5.2. Panorama synthesis algorithm implemented in OpenCV
305
+ Because the details of the process in ICS are not pub-
306
+ licly available, we implemented an algorithm similar to Al-
307
+ gorithm 2 that mimics the ICS process, using OpenCV, an
308
+ open-source computer vision library. The Kth image was the
309
+ closest to the end of the shooting. The jth image and the i+1
310
+ image are matched for SIFT features, and if the two images
311
+ have many similar features, the degree of change between
312
+ the images is calculated and added to the list. The ith image
313
+ is matched to the i + 1 image. If the two images have few
314
+
315
+ Algorithm 1 Image Composite Software processing details
316
+ 1:
317
+ i = 0, j = 0
318
+ 2:
319
+ while i + j + 1 ≤ N do
320
+ 3:
321
+ A = Images[i]
322
+ 4:
323
+ j = 0
324
+ 5:
325
+ while j ≤ 10 do
326
+ 6:
327
+ B = Images[i+j+1]
328
+ 7:
329
+ if Find matching area with A then
330
+ 8:
331
+ A = Stitch B onto A
332
+ 9:
333
+ i = i + 1, j = 0
334
+ 10:
335
+ else
336
+ 11:
337
+ j = j + 1
338
+ Algorithm 2 Calculate the difference in coordinates between
339
+ images
340
+ 1:
341
+ i = 0
342
+ 2:
343
+ coord = [], usedImages = []
344
+ 3:
345
+ while j ≤ N do
346
+ 4:
347
+ j = i
348
+ 5:
349
+ error = 0
350
+ 6:
351
+ flag = True
352
+ 7:
353
+ while flag do
354
+ 8:
355
+ Extract
356
+ SIFT
357
+ features
358
+ of
359
+ Image[j]
360
+ and
361
+ Image[i+1]
362
+ 9:
363
+ if Two images could be feature matched then
364
+ 10:
365
+ C = cal coord diff(Image[j], Image[i+1])
366
+ 11:
367
+ coord.append(C)
368
+ 12:
369
+ usedImages.append(Image[j], Image[i+1])
370
+ 13:
371
+ i += 1
372
+ 14:
373
+ flag = False
374
+ 15:
375
+ else
376
+ 16:
377
+ error += 1
378
+ 17:
379
+ if error ≥ 10 then
380
+ 18:
381
+ j += 1
382
+ 19:
383
+ else
384
+ 20:
385
+ i += 1
386
+ 21: usedImages = list(set(usedImages)
387
+ 22: return coord, usedImages
388
+ feature points and cannot be matched, add 1 to the values of
389
+ i and f and perform the SIFT feature extraction again. If this
390
+ process fails ten times, add 1 to the value of j and perform
391
+ the process again taking j equivalent to i, that is, j = i. Us-
392
+ ing the above algorithm, the degree of change in coordinates
393
+ between the images used for composition and the images can
394
+ be calculated. The global coordinates of the entire composite
395
+ coordinates can be obtained by setting the smallest values of
396
+ the x and y coordinates to zero and calculating the cumula-
397
+ tive sum till that point.
398
+ 3.6. Image sharpening process
399
+ We divided the combined panoramic image into 64 × 64
400
+ [pixel] grid regions. The image number of each composite
401
+ image and the coordinates of the constituent images were
402
+ obtained from the coordinates of the panoramic image. The
403
+ coordinates of the constituent images in the upper-left corner
404
+ of each grid region were obtained. Tenengrad values were
405
+ calculated for the cropped images. The image with the high-
406
+ est Tenengrad value among the cropped images was pasted
407
+ onto a newly created blank image of the same size as the
408
+ panoramic image with the exact extracted coordinates. These
409
+ processes were performed in all regions. The image with the
410
+ highest Tenengrad value among the multiple overlapping im-
411
+ ages was pasted to obtain a clear image.
412
+ 3.7. Creating the Guttae Classifier
413
+ 3.7.1. Creating Dataset
414
+ A large amount of data is required for training using net-
415
+ works. However, since the amount of data provided in this
416
+ study was small, it was necessary to augment the data. Ad-
417
+ ditionally, due to the large size of the image data, it was nec-
418
+ essary to reduce the size of the images to use them in the
419
+ dataset. Because image resizing results in a loss of infor-
420
+ mation, we developed a new data augmentation method for
421
+ small and large image data. The corneal endothelial cell im-
422
+ age and mask image showing the location of the guttae were
423
+ divided into a grid of 64 × 64 [pixels]. The image was clipped
424
+ three times by 16 [pixels] to the right and three times by 16
425
+ [pixels] to the bottom, thereby shifting the image area. Thus,
426
+ the images were expanded 16 times for a single-grid region.
427
+ Images that did not contain guttae were excluded from the
428
+ dataset.
429
+ 3.7.2. Training and Prediction in CNN
430
+ Twenty-one corneal endothelial cell images with a mask
431
+ image showing the location of the guttae were prepared
432
+ (Fig.1 Dataset 2). The Segmentation model PyTorch, which
433
+ is a Python library for implementing segmentation-specific
434
+ CNNs, was used in the experiments. The best encoder back-
435
+ bone is determined using ResNet18, ResNet34, ResNet50,
436
+ VGG11, and VGG16. Twenty-one images were divided into
437
+ two sets of eighteen and three images; eighteen were used
438
+ to develop this model and three were used to determine the
439
+ backbone. The node weights of ImageNet were transferred
440
+ to this model, and fine-tuning was performed. The network
441
+ of U-Net [14] encoders modified to ResNet50 [15] is the best
442
+ backbone. Adam [16] was used as the optimization function
443
+ with an initial learning rate of 1e-4, wherein the loss func-
444
+ tion is the least squares error. Predictions were made on a 64
445
+ × 64 [pixels] image extracted during the sharpening process,
446
+ pasted to the original position, and the location of the guttae
447
+ was predicted for the panoramic image.
448
+ 4. RESULTS AND DISCUSSION
449
+ 4.1. Comparison of algorithms implemented in ICS and
450
+ OpenCV
451
+ An example of a mouse corneal endothelial cell is shown
452
+ in Fig.2A. The images synthesized using ICS were more cir-
453
+ cular than those synthesized using OpenCV. This result indi-
454
+ cates that the synthesis was performed correctly. On the other
455
+ hand, images synthesized using the OpenCV algorithm were
456
+ not circular and were often pasted in incorrect locations.
457
+ An example of the synthesis result of the algorithm im-
458
+ plemented in OpenCV for artificial cornea data is shown in
459
+
460
+ (a) Panoramic images by Image Composite Software
461
+ (b) Panoramic images by OpenCV
462
+ A.
463
+ (a) Truth (Artificial cornea image)
464
+ (b) Panoramic image by OpenCV
465
+ B.
466
+ Fig. 2.
467
+ A: Comparison of results with Image Composite Software and software implemented in OpenCV. B: Results with
468
+ OpenCV implemented software on artificial cornea data.(a) shows the composite result and (b) shows the overlap of the
469
+ component images.
470
+ Fig.2B. This is the synthesis result when the focus is per-
471
+ fectly aligned and there are no blurred images. The result is
472
+ almost the same as the correct data, where there is no unnat-
473
+ ural overlap between the composite images, indicating that
474
+ compositing was performed correctly. If the in-focus images
475
+ are well extracted, they can be integrated well using OpenCV.
476
+ Overall, ICS is a more robust method.
477
+ 4.2. Synthesis results with ICS
478
+ As previously mentioned, when taking moving images of
479
+ the mouse cornea with the contact specular microscope, the
480
+ images are taken in an upward direction from the center of
481
+ the cornea, then leftward along the edge of the cornea, and
482
+ downward once around the edge. Next, it was photographed
483
+ clockwise and then down to the center. For the 94 videos, the
484
+ left- and right-rotated portions were split, and for each case,
485
+ an integrated image was created using the ICS. The results
486
+ are presented in Table1. An image was classified as ”image
487
+ dropout” if the center of the image was missing, ”distorted
488
+ shape” if the shape was distorted instead of being circular,
489
+ and ”unnatural paste” if the image was pasted in an unnat-
490
+ ural location.
491
+ On the other hand, an image in which the
492
+ shape of the cornea in the combined panoramic image was
493
+ kept circular, the center was not missing, and there were no
494
+ unnaturally pasted parts was classified as a ”success”. The
495
+ number of images in which either the right- or left-rotated
496
+ part of the image was correctly merged was 75. Fig.3 com-
497
+ pares the results of the manual and ICS syntheses. Among
498
+ the videos that failed to be synthesized using ICS, those with
499
+ distorted shapes or unnatural pasting were verified to contain
500
+ frames that deviated from the cornea owing to contamina-
501
+ tion on the specular microscope or a shaking camera. It is
502
+ considered that the stains themselves became feature points,
503
+ and the pasting of the composite part failed. Additionally,
504
+ the position of the image was significantly changed because
505
+ of significant blurring, resulting in an unnatural position for
506
+ Table 1. Synthesis results with ICS
507
+ Result
508
+ Left
509
+ Right
510
+ image dropout
511
+ 15
512
+ 20
513
+ distorted shape
514
+ 6
515
+ 12
516
+ unnatural paste
517
+ 10
518
+ 6
519
+ success
520
+ 63
521
+ 56
522
+ pasting and distorting the overall shape of the image.
523
+ The results of the ICS and manually composited images
524
+ were almost identical, suggesting that there was no problem
525
+ with the image-compositing algorithm and that the mouse
526
+ cornea was not captured in the first place when the specular
527
+ microscope was used. The image was taken from the cen-
528
+ ter of the mouse cornea in an upward direction, followed by
529
+ leftward rotation along the edge of the cornea, and then a
530
+ downward direction was taken at the point where the image
531
+ had gone around the edge. If there is an area in the center of
532
+ the cornea that has not been photographed at this time, the
533
+ image will be missing.
534
+ 4.3. Segmentation of guttae in panoramic images
535
+ The model that detects the guttae location was applied
536
+ to the panoramic images of the videos (Fig.1, Dataset 1).
537
+ Fig.4 shows two prediction examples of the guttae position
538
+ in a panoramic image: (c) and (d) are the prediction results
539
+ in (a) and (b), respectively. The original panoramic images
540
+ were combined using ICS, and the edges of the corneas were
541
+ cropped after sharpening.
542
+ Segmentation was performed on the images synthesized
543
+ from the entire cornea using ICS. Currently, 21 still images
544
+ and a mask image annotated with guttae are used for segmen-
545
+ tation into training and validation datasets. The model was
546
+ evaluated by comparing the validation data with predicted re-
547
+ sults. Fig.4 shows that while the segmentation of the likely
548
+
549
+ (c) Panoramic image by manually
550
+ (Panoramic image was integrated Successfully.)
551
+ (d) Panoramic image by manually
552
+ (There is a hole in the center.)
553
+ (a) Panoramic image by Image Composite Software
554
+ (Panoramic image was integrated Successfully.)
555
+ (b) Panoramic image by Image Composite Software
556
+ (There is a hole in the center.)
557
+ Fig. 3.
558
+ Integrated image results by Image Composite Software and manual composite.The left side of each represents the
559
+ composite result, and the right side represents the overlap of the component images.
560
+ (a) Panoramic image with
561
+ corneal rim removed
562
+ (b) Prediction results for
563
+ guttae location
564
+ (c) Panoramic image with
565
+ corneal rim removed
566
+ (d) Prediction results for
567
+ guttae location
568
+ Fig. 4. Two examples of panoramic images and guttae loca-
569
+ tions.
570
+ guttae is successful, it also partially predicts the guttae at the
571
+ edges of the cornea. Further quantitative evaluation is es-
572
+ sential; however, it may pose some limitations for future re-
573
+ search. For this purpose, it is necessary to prepare an image
574
+ to which the Grand Truth of the guttae is assigned.
575
+ 4.4. Discussion
576
+ It was observed that the algorithm implemented in
577
+ OpenCV could not correctly synthesize results using mouse
578
+ corneal endothelial cell images. The results showed that the
579
+ images were pasted in unnatural positions compared to those
580
+ obtained using artificial cornea data. The significant differ-
581
+ ence between the mouse corneal endothelial cell image data
582
+ and the artificial cornea data is that, with the artificial cornea
583
+ data, all images are in focus and the images themselves are
584
+ not blurred. For the image data of mouse corneal endothe-
585
+ lial cells, the process of extracting images in focus involved
586
+ extracting images at equal intervals in chronological order,
587
+ which resulted in the extraction of out-of-focus images. In
588
+ contrast, the ICS-based method synthesized the images more
589
+ accurately than the OpenCV-based synthesis software did be-
590
+ cause ICS uses a feature extraction method appropriate for
591
+ corneal endothelial cells.
592
+ By contrast, OpenCV synthesis uses SIFT for feature ex-
593
+ traction. This synthesis is considered to be progressing well.
594
+ Until now, it has not been possible to obtain images of the
595
+ entire cornea because of the narrow imaging range of spec-
596
+ ular microscopy. This study suggests that it is possible to
597
+ obtain a composite image of the entire cornea by extracting
598
+ a relatively focused image from a video of the entire cornea.
599
+ 5. CONCLUSIONS AND FUTURE WORK
600
+ The status of the entire cornea is currently inferred from
601
+ the center of diagnosis and observation of corneal endothe-
602
+ lial cells using specular microscopy. If images of the entire
603
+ cornea could be obtained, more studies would be possible.
604
+ In this study, we proposed a framework for generating im-
605
+ ages of the entire cornea from videos captured using contact
606
+ specular microscopy. Focused images were extracted from
607
+ the video and a panoramic composite image was generated.
608
+ Furthermore, we constructed a learning model, U-Net, to ex-
609
+ tract the guttae from the entire image. To study the effec-
610
+ tiveness of the proposed framework, we implemented it and
611
+ applied it to corneal data from a mouse model of FECD. The
612
+ panorama synthesis application used in the implementation
613
+ was our custom-built ICS and the OpenCV algorithm, which
614
+ is an open-source software. Artificial corneal images were
615
+ synthesized with no unnatural aspects in the results. How-
616
+
617
+ ever, some of the extracted images were not correctly syn-
618
+ thesized if they contained blurred images, and many images
619
+ were correctly synthesized using ICS.
620
+ After the panorama was merged, the image was divided
621
+ into a grid. Majority of the in-focus images were extracted
622
+ and pasted, resulting in a sharper image than the previous
623
+ output obtained using ICS. Using the extracted images within
624
+ the region, we could also predict the guttae location. Al-
625
+ though the implementation and application of the method to
626
+ the data in this study confirmed its effectiveness, few quanti-
627
+ tative evaluations have been performed. Quantitative evalua-
628
+ tion, such as the accuracy of implementation, is an issue for
629
+ the future.
630
+ REFERENCES
631
+ [1]
632
+ Allen O Eghrari, S Amer Riazuddin, and John D
633
+ Gottsch. Fuchs corneal dystrophy. Progress in molecu-
634
+ lar biology and translational science, 134:79–97, 2015.
635
+ [2]
636
+ Gargi Gouranga Nanda and Debasmita Pankaj Alone.
637
+ Current understanding of the pathogenesis of fuchs’ en-
638
+ dothelial corneal dystrophy. Molecular vision, 25:295,
639
+ 2019.
640
+ [3]
641
+ Philippe Gain, R´emy Jullienne, Zhiguo He, Mansour
642
+ Aldossary, Sophie Acquart, Fabrice Cognasse, and
643
+ Gilles Thuret. Global survey of corneal transplantation
644
+ and eye banking. JAMA ophthalmology, 134(2):167–
645
+ 173, 2016.
646
+ [4]
647
+ Naoki Okumura,
648
+ Shigeru Kinoshita,
649
+ and Noriko
650
+ Koizumi. Application of rho kinase inhibitors for the
651
+ treatment of corneal endothelial diseases. Journal of
652
+ ophthalmology, 2017, 2017.
653
+ [5]
654
+ Naoki Okumura,
655
+ Shigeru Kinoshita,
656
+ and Noriko
657
+ Koizumi. The role of rho kinase inhibitors in corneal
658
+ endothelial dysfunction. Current Pharmaceutical De-
659
+ sign, 23(4):660–666, 2017.
660
+ [6]
661
+ Jay W McLaren, Lori A Bachman, Katrina M Kane,
662
+ and Sanjay V Patel. Objective assessment of the corneal
663
+ endothelium in fuchs’ endothelial dystrophy. Investiga-
664
+ tive ophthalmology & visual science, 55(2):1184–1190,
665
+ 2014.
666
+ [7]
667
+ Naoki Okumura, Shohei Yamada, Takeru Nishikawa,
668
+ Kaito Narimoto, Kengo Okamura, Ayaka Izumi, Satoru
669
+ Hiwa, Tomoyuki Hiroyasu, and Noriko Koizumi. U-
670
+ net convolutional neural network for segmenting the
671
+ corneal endothelium in a mouse model of fuchs en-
672
+ dothelial corneal dystrophy. Cornea, 2021.
673
+ [8]
674
+ Takeru Nishikawa, Naoki Okumura, Kaito Narimoto,
675
+ Shohei Yamada, Kengo Okamura, Ayaka Izumi, and
676
+ Noriko Koizumi.
677
+ Deep neural network for the anal-
678
+ ysis of guttae via semi-supervised learning in a fuchs
679
+ endothelial corneal dystrophy mouse model. Investiga-
680
+ tive Ophthalmology & Visual Science, 62(8):826–826,
681
+ 2021.
682
+ [9]
683
+ Hiroshi Tanaka, Naoki Okumura, Noriko Koizumi,
684
+ Chie Sotozono, Yasuhiro Sumii, and Shigeru Kinoshita.
685
+ Panoramic view of human corneal endothelial cell layer
686
+ observed by a prototype slit-scanning wide-field con-
687
+ tact specular microscope. British Journal of Ophthal-
688
+ mology, 101(5):655–659, 2017.
689
+ [10] Matthew Brown and David G Lowe.
690
+ Automatic
691
+ panoramic image stitching using invariant features. In-
692
+ ternational journal of computer vision, 74(1):59–73,
693
+ 2007.
694
+ [11] David G Lowe. Distinctive image features from scale-
695
+ invariant keypoints. International journal of computer
696
+ vision, 60(2):91–110, 2004.
697
+ [12] Eric Krotkov. Focusing. International Journal of Com-
698
+ puter Vision, 1(3):223–237, 1988.
699
+ [13] Said Pertuz, Domenec Puig, and Miguel Angel Garcia.
700
+ Analysis of focus measure operators for shape-from-
701
+ focus. Pattern Recognition, 46(5):1415–1432, 2013.
702
+ [14] Olaf Ronneberger, Philipp Fischer, and Thomas Brox.
703
+ U-net: Convolutional networks for biomedical image
704
+ segmentation. In International Conference on Medical
705
+ image computing and computer-assisted intervention,
706
+ pages 234–241. Springer, 2015.
707
+ [15] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian
708
+ Sun. Deep residual learning for image recognition. In
709
+ Proceedings of the IEEE conference on computer vision
710
+ and pattern recognition, pages 770–778, 2016.
711
+ [16] Diederik P Kingma and Jimmy Ba.
712
+ Adam:
713
+ A
714
+ method for stochastic optimization.
715
+ arXiv preprint
716
+ arXiv:1412.6980, 2014.
717
+
4NE0T4oBgHgl3EQfeQCD/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf,len=346
2
+ page_content='Generating corneal panoramic images from contact specular microscope images Yusuke Nagira1†, Yuzuha Hara2, Satoru Hiwa2 Naoki Okumura3, Noriko Koizumi3 and Tomoyuki Hiroyasu2 1Graduate School of Life and Medical Sciences, Doshisha University, Japan 2Department of Biomedical Sciences and Informatics, Doshisha University, Japan 3Department of Biomedical Engineering, Faculty of Life and Medical Sciences, Doshisha University, Japan (Tel: +81-774-65-6020, E-mail: tomo@is.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
3
+ page_content='doshisha.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
4
+ page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
5
+ page_content='jp) Abstract: The contact specular microscope has a wider angle of view than that of the non-contact specular microscope but still cannot capture an image of the entire cornea.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
6
+ page_content=' To obtain such an image, it is necessary to prepare film on the parts of the image captured sequentially and combine them to create a complete image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
7
+ page_content=' This study proposes a framework to automatically generate an entire corneal image from videos captured using a contact specular microscope.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
8
+ page_content=' Relatively focused images were extracted from the videos and panoramic compositing was performed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
9
+ page_content=' If an entire image can be generated, it is possible to detect guttae from the image and examine the extent of their presence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
10
+ page_content=' The system was implemented and the effectiveness of the proposed framework was examined.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
11
+ page_content=' The system was implemented using custom-made composite software, Image Composite Software (ICS, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
12
+ page_content='I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
13
+ page_content=' Technology Co.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
14
+ page_content=', Ltd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
15
+ page_content=', Japan, internal algorithms not disclosed), and a supervised learning model using U-Net was used for guttae detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
16
+ page_content=' Several images were correctly synthesized when the constructed system was applied to 94 different corneal videos obtained from Fuchs endothelial corneal dystrophy (FECD) mouse model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
17
+ page_content=' The implementation and application of the method to the data in this study confirmed its effectiveness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
18
+ page_content=' Owing to the minimal quantitative evaluation performed, such as accuracy with implementation, it may pose some limitations for future investigations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
19
+ page_content=' Keywords: U-Net, Semantic Segmentation, Fuchs Endothelial Corneal Dystrophy, Corneal Endothelial Cell 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
20
+ page_content=' INTRODUCTION Fuchs endothelial corneal dystrophy (FECD) is a bilat- eral disease, wherein corneal endothelial cells are unable to maintain their hexagonal shape.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
21
+ page_content=' It is characterized by the accelerated loss of corneal endothelial cells with changes in Descemet’s membrane, resulting in the formation of an ex- tracellular matrix called guttae [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
22
+ page_content=' In the United States, it is estimated that 4% of people over 40 years of age are af- fected by the disease, occurring more commonly in women and more frequently in people in their 40s and 50s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
23
+ page_content=' Corneal endothelial pump function is lost as the disease progresses, causing corneal edema [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
24
+ page_content=' Presently, corneal transplanta- tion is the only reliable treatment, and FECD accounts for 39% of all corneal transplants performed, making it the most common cause of corneal transplantation worldwide [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
25
+ page_content=' Rho kinase inhibitors have been reported to promote cell proliferation and adhesion to substrates, inhibit corneal en- dothelial cell apoptosis, and promote wound healing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
26
+ page_content=' There- fore, using Rho kinase inhibitor eye drops is a potential novel treatment approach alternative to corneal transplanta- tion [4][5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
27
+ page_content=' In drug discovery research for FECD, the state of the corneal endothelium, such as the guttae, is observed before and after the drug use and evaluated based on the increase or decrease in the number of cells.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
28
+ page_content=' Doctors and researchers widely use specular microscopes to observe the state of the corneal endothelium.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
29
+ page_content=' However, the range of the micro- scope’s imaging capability is limited and the current practice estimates the state of the entire cornea from its center.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
30
+ page_content=' The endothelial cell density (ECD) is essential for understanding † Yusuke Nagira is the presenter of this paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
31
+ page_content=' the pathogenesis of FECD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
32
+ page_content=' However, ECD cannot be mea- sured accurately owing to the presence of guttae;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
33
+ page_content=' hence, the number of cells is measured manually [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
34
+ page_content=' Using a mouse model to demonstrate the pathogenesis of FECD, studies have been conducted on the segmentation of guttae using U-Net and the calculation of cell density in areas excluding guttae [7][8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
35
+ page_content=' In previous studies on the panoramic synthesis of the corneal endothelium, focused images were extracted manually and stitched together.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
36
+ page_content=' Panoramic com- positing of the entire cornea is yet to be performed because the images were localized panoramic images and did not rep- resent the entire cornea [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
37
+ page_content=' Conventional panorama synthe- sis software such as AutoStitch [10] does not consider the order of the input images used for synthesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
38
+ page_content=' Instead, it ex- tracts the image features using Scale-Invariant Feature Trans- form (SIFT) [11] and stitches the matching features together.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
39
+ page_content=' When pasting an image, it is deformed and scaled.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
40
+ page_content=' For im- ages with similar characteristics, such as corneal endothelial cells, the position of the image to be pasted may be incor- rect or the shape of the cells may be deformed owing to the deformation of the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
41
+ page_content=' In this case, accurate values can- not be obtained when calculating the area of the cells or the percentage of guttae.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
42
+ page_content=' Therefore, the original image needed to have as minimal deformations as possible in the combined image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
43
+ page_content=' Alternatively, it is necessary to provide a mechanism to access the original version of the image of interest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
44
+ page_content=' This study proposes a framework for a system that gen- erates images of the entire corneal endothelium from videos obtained using a contact specular microscope.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
45
+ page_content=' In the pro- posed framework, the focused images are extracted from the video images, feature extraction is performed, and the im- ages are synthesized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
46
+ page_content=' During synthesis, the system reduces arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
47
+ page_content='02388v1 [eess.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
48
+ page_content='IV] 6 Jan 2023 or deforms the original image to the minimum and adds a feature that allows access to the original image of the area of interest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
49
+ page_content=' This study examined the proposed framework by building a system using two implementation methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
50
+ page_content=' Fur- ther, we added a function for automatically detecting guttae using U-Net, a type of Deep Learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
51
+ page_content=' This study presents a proposed framework and an example of its implementation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
52
+ page_content=' Quantitative evaluation of whole corneal endothelial images and gut detection is insufficient, which poses a limitation that must be addressed in future studies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
53
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
54
+ page_content=' FRAMEWORK OF CORNEAL PANORAMIC IMAGE GENERATION FROM CONTACT SPECULAR MICROSCOPE IMAGES 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
55
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
56
+ page_content=' Overview of the proposed framework Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
57
+ page_content='1 presents an overview of the proposed framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
58
+ page_content=' An image of the entire cornea was generated from a video frame of the cornea captured using a contact-type specu- lar microscope.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
59
+ page_content=' First, a focused still image was extracted from the target video image (dataset 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
60
+ page_content=' Second, the features of still images were extracted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
61
+ page_content=' Third, the matching feature points were combined to create a panoramic image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
62
+ page_content=' Next, the panoramic image was divided into grid regions and the most focused image was selected from each region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
63
+ page_content=' Guttae were detected in the combined panoramic images using deep learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
64
+ page_content=' Additionally, still images of the corneal endothe- lium containing the guttae and mask images exhibiting the location of the guttae were used for the model generation (dataset 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
65
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
66
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
67
+ page_content=' Extraction of in-focus images A group of still images was extracted from the captured videos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
68
+ page_content=' The entire corneal endothelium was captured and converted to a single frame-by-frame image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
69
+ page_content=' If there were N frames in the video, N images were the output in total that were then divided into five groups in chronological order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
70
+ page_content=' The image with the highest in-focus evaluation index was selected from each group.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
71
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
72
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
73
+ page_content=' Creating panoramic images The algorithm for generating a single panoramic image is accomplished through the following steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
74
+ page_content=' First, characteris- tic points in the images were extracted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
75
+ page_content=' Subsequently, a curve connecting the characteristic points was obtained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
76
+ page_content=' Here, the optimal curve connecting the extracted characteristic points was obtained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
77
+ page_content=' This curve was then used to enlarge or interpo- late the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
78
+ page_content=' A panoramic image was generated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
79
+ page_content=' In the fol- lowing experiments, two algorithms were prepared and the results were compared.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
80
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
81
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
82
+ page_content=' Image sharpening process The synthesized panoramic images were mostly over- lapped images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
83
+ page_content=' Additionally, the synthesized image is often blurred because of the transparency and brightness of the im- age change.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
84
+ page_content=' Therefore, a method was developed to obtain clearer images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
85
+ page_content=' The implementation of the sharpening pro- cess is described in the next section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
86
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
87
+ page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
88
+ page_content=' Creating the Guttae Classifier by U-Net The U-Net is a neural network commonly used for image segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
89
+ page_content=' U-Net uses a convolutional neural network to encode an input image as a feature map, subsequently decod- ing that feature map to separate specific objects in the input image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
90
+ page_content=' It is possible to prepare a dataset of corneal images and train U-Net using this dataset to generate a model for extracting the guttae.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
91
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
92
+ page_content=' SYSTEM IMPLEMENTATION AND DATA APPLICATION 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
93
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
94
+ page_content=' Outline This study implemented a system to confirm the effec- tiveness of the proposed framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
95
+ page_content=' Corneal videos ob- tained from the FECD mouse model were processed to obtain panoramic images of the cornea.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
96
+ page_content=' Tenengrad was used to ob- tain the in-focus images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
97
+ page_content=' Finally, two different applications were used to generate panoramic images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
98
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
99
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
100
+ page_content=' Mouse Model of FECD This study used images of whole corneal endothelial cells from the FECD pathology mouse model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
101
+ page_content=' A single nucleotide mutation in COL8A2 generated these genes, and it has been reported that guttae increase over time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
102
+ page_content=' The Tissue Engineer- ing Laboratory, Graduate School of Biomedical Sciences, Doshisha University, provided the images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
103
+ page_content=' The images were taken using a prototype KSSP slit-scanning wide-field contact specular microscope (Konan Medical, Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
104
+ page_content=', Nishi- nomiya, Japan), had a resolution of 1620 × 1080 [pixels] at a frame rate of 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
105
+ page_content='9 [fps] in MOV file format.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
106
+ page_content=' The images were taken in the following order: 1) starting from the center of the cornea;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
107
+ page_content=' 2) moving to the top of the cornea;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
108
+ page_content=' 3) mov- ing to the left;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
109
+ page_content=' 4) filming from the top to the bottom of the cornea;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
110
+ page_content=' 5) moving around the right side;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
111
+ page_content=' 6) filming from the bottom to the center of the cornea.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
112
+ page_content=' In this study, 94 videos were prepared and used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
113
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
114
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
115
+ page_content=' Extraction of in-focus images by Tenengrad The focus evaluation index was calculated as follows: Tenengrad [12][13] value, which is the gradient of the im- age based on the pixel value, is calculated for each region, where Gx and Gy are the convolved values of the Sobel op- erator of the pixel values in the x-direction and y-direction, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
116
+ page_content=' Φx,y = � (i,j)∈Ω(x,y) (Gx(i, j)2 + Gy(i, j)2) (1) The Sobel operator is expressed as Kx = � � −1 0 1 −2 0 2 −1 0 1 � � , Ky = � � −1 −2 −1 0 0 0 1 2 1 � � The highest value in the quadratic area of a single image was considered the focal value for that image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
117
+ page_content=' When this value was calculated for the entire image, the gradient was smaller in the area containing the edge of the corneal en- dothelium.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
118
+ page_content=' In contrast, the gradient increased in the area containing corneal endothelial cells.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
119
+ page_content=' Thus, as the area of Image integration for panoramic image of the entire cornea Learning Phase … U-Net Corneal endothelial images including Guttae Mask image annotated with Guttae locations Dataset 2 Prediction Phase Modeled U-Net Video of the entire cornea Dataset 1 Pre-processing Focused image extraction Feature extraction Image integration Sharpen process of panorama images Final panoramic image Guttae prediction Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
120
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
121
+ page_content=' Overview of the proposed framework the rim increases, the Tenengrad value for the entire image becomes smaller.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
122
+ page_content=' This procedure prevents the corneal en- dothelium from being excluded from the image even if it is appropriately captured.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
123
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
124
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
125
+ page_content=' Creating the idealized panoramic artificial CECs image data To confirm the effectiveness of the panorama synthesis software, a set of idealized panoramic artificial corneal en- dothelial cell images were created with no blurring or focus mismatch on the extracted images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
126
+ page_content=' These images were ob- tained using the GNU Image Manipulation Program (GIMP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
127
+ page_content=' These artificial images were created based on the synthesis results obtained using the panorama synthesis software de- scribed below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
128
+ page_content=' A layer was added to the composite image, the cell membrane of the corneal endothelium was traced, and the areas considered to be guttae were painted black.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
129
+ page_content=' The color of the surrounding endothelial cells was extracted from the layer depicting the cell membrane and guttae using the color picker function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
130
+ page_content=' The layers are filled with the same color.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
131
+ page_content=' This process was applied to the entire cornea to create artificial images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
132
+ page_content=' For areas where the cell membrane was not visible owing to issues such as focus mismatch or blurring, the cell membrane was depicted by referring to another im- age in which the cell membrane could be observed appropri- ately.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
133
+ page_content=' Idealized panoramic artificial corneal endothelial cell images were created that mimicked the distribution and size of the guttae, as well as the size and shape of the corneal en- dothelial cells.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
134
+ page_content=' The created image was 1870 × 1080 [pixel] in size and was cropped and stored by moving approximately 10 [pixels] from the center to the top, counterclockwise from the top, and counterclockwise from top to bottom, mimick- ing the movement of a motion camera.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
135
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
136
+ page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
137
+ page_content=' Creating panoramic images In the composite process, two types of applications were used;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
138
+ page_content=' the Image Composite Software (ICS) and the panorama synthesis algorithm implemented in OpenCV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
139
+ page_content=' The algo- rithms are explained as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
140
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
141
+ page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
142
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
143
+ page_content=' Image Composite Software (ICS) Image Composite Software (K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
144
+ page_content='I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
145
+ page_content=' Technology CO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
146
+ page_content=', LTD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
147
+ page_content=', Yokohama, Kanagawa, Japan) is a panorama compositive software created with specifications suitable for image com- positing corneal endothelial images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
148
+ page_content=' This application was used for the composite process and was custom-made.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
149
+ page_content=' Since this is a commercial application, we cannot explain the de- tails of its contents due to copyright.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
150
+ page_content=' The original image is not reduced or enlarged when the images are superimposed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
151
+ page_content=' Additionally, an API to access the original image is provided, allowing quick access to the original image of the area of in- terest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
152
+ page_content=' A flowchart of the panorama compositing process is shown in Algorithm 1, where the first image is used as the reference image, and the regions that match the first image are searched in order of image number.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
153
+ page_content=' If no match is ob- served, the image merged with the reference image is used as the reference for the subsequent image, and the process is repeated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
154
+ page_content=' The image merging is terminated when ten consec- utive images are not observed to match the reference image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
155
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
156
+ page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
157
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
158
+ page_content=' Panorama synthesis algorithm implemented in OpenCV Because the details of the process in ICS are not pub- licly available, we implemented an algorithm similar to Al- gorithm 2 that mimics the ICS process, using OpenCV, an open-source computer vision library.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
159
+ page_content=' The Kth image was the closest to the end of the shooting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
160
+ page_content=' The jth image and the i+1 image are matched for SIFT features, and if the two images have many similar features, the degree of change between the images is calculated and added to the list.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
161
+ page_content=' The ith image is matched to the i + 1 image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
162
+ page_content=' If the two images have few Algorithm 1 Image Composite Software processing details 1: i = 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
163
+ page_content=' j = 0 2: while i + j + 1 ≤ N do 3: A = Images[i] 4: j = 0 5: while j ≤ 10 do 6: B = Images[i+j+1] 7: if Find matching area with A then 8: A = Stitch B onto A 9: i = i + 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
164
+ page_content=' j = 0 10: else 11: j = j + 1 Algorithm 2 Calculate the difference in coordinates between images 1: i = 0 2: coord = [],' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
165
+ page_content=' usedImages = [] 3: while j ≤ N do 4: j = i 5: error = 0 6: flag = True 7: while flag do 8: Extract SIFT features of Image[j] and Image[i+1] 9: if Two images could be feature matched then 10: C = cal coord diff(Image[j],' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
166
+ page_content=' Image[i+1]) 11: coord.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
167
+ page_content='append(C) 12: usedImages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
168
+ page_content='append(Image[j], Image[i+1]) 13: i += 1 14: flag = False 15: else 16: error += 1 17: if error ≥ 10 then 18: j += 1 19: else 20: i += 1 21: usedImages = list(set(usedImages) 22: return coord, usedImages feature points and cannot be matched, add 1 to the values of i and f and perform the SIFT feature extraction again.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
169
+ page_content=' If this process fails ten times, add 1 to the value of j and perform the process again taking j equivalent to i, that is, j = i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
170
+ page_content=' Us- ing the above algorithm, the degree of change in coordinates between the images used for composition and the images can be calculated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
171
+ page_content=' The global coordinates of the entire composite coordinates can be obtained by setting the smallest values of the x and y coordinates to zero and calculating the cumula- tive sum till that point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
172
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
173
+ page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
174
+ page_content=' Image sharpening process We divided the combined panoramic image into 64 × 64 [pixel] grid regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
175
+ page_content=' The image number of each composite image and the coordinates of the constituent images were obtained from the coordinates of the panoramic image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
176
+ page_content=' The coordinates of the constituent images in the upper-left corner of each grid region were obtained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
177
+ page_content=' Tenengrad values were calculated for the cropped images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
178
+ page_content=' The image with the high- est Tenengrad value among the cropped images was pasted onto a newly created blank image of the same size as the panoramic image with the exact extracted coordinates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
179
+ page_content=' These processes were performed in all regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
180
+ page_content=' The image with the highest Tenengrad value among the multiple overlapping im- ages was pasted to obtain a clear image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
181
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
182
+ page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
183
+ page_content=' Creating the Guttae Classifier 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
184
+ page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
185
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
186
+ page_content=' Creating Dataset A large amount of data is required for training using net- works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
187
+ page_content=' However, since the amount of data provided in this study was small, it was necessary to augment the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
188
+ page_content=' Ad- ditionally, due to the large size of the image data, it was nec- essary to reduce the size of the images to use them in the dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
189
+ page_content=' Because image resizing results in a loss of infor- mation, we developed a new data augmentation method for small and large image data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
190
+ page_content=' The corneal endothelial cell im- age and mask image showing the location of the guttae were divided into a grid of 64 × 64 [pixels].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
191
+ page_content=' The image was clipped three times by 16 [pixels] to the right and three times by 16 [pixels] to the bottom, thereby shifting the image area.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
192
+ page_content=' Thus, the images were expanded 16 times for a single-grid region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
193
+ page_content=' Images that did not contain guttae were excluded from the dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
194
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
195
+ page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
196
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
197
+ page_content=' Training and Prediction in CNN Twenty-one corneal endothelial cell images with a mask image showing the location of the guttae were prepared (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
198
+ page_content='1 Dataset 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
199
+ page_content=' The Segmentation model PyTorch, which is a Python library for implementing segmentation-specific CNNs, was used in the experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
200
+ page_content=' The best encoder back- bone is determined using ResNet18, ResNet34, ResNet50, VGG11, and VGG16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
201
+ page_content=' Twenty-one images were divided into two sets of eighteen and three images;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
202
+ page_content=' eighteen were used to develop this model and three were used to determine the backbone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
203
+ page_content=' The node weights of ImageNet were transferred to this model, and fine-tuning was performed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
204
+ page_content=' The network of U-Net [14] encoders modified to ResNet50 [15] is the best backbone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
205
+ page_content=' Adam [16] was used as the optimization function with an initial learning rate of 1e-4, wherein the loss func- tion is the least squares error.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
206
+ page_content=' Predictions were made on a 64 × 64 [pixels] image extracted during the sharpening process, pasted to the original position, and the location of the guttae was predicted for the panoramic image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
207
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
208
+ page_content=' RESULTS AND DISCUSSION 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
209
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
210
+ page_content=' Comparison of algorithms implemented in ICS and OpenCV An example of a mouse corneal endothelial cell is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
211
+ page_content='2A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
212
+ page_content=' The images synthesized using ICS were more cir- cular than those synthesized using OpenCV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
213
+ page_content=' This result indi- cates that the synthesis was performed correctly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
214
+ page_content=' On the other hand, images synthesized using the OpenCV algorithm were not circular and were often pasted in incorrect locations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
215
+ page_content=' An example of the synthesis result of the algorithm im- plemented in OpenCV for artificial cornea data is shown in (a) Panoramic images by Image Composite Software (b) Panoramic images by OpenCV A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
216
+ page_content=' (a) Truth (Artificial cornea image) (b) Panoramic image by OpenCV B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
217
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
218
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
219
+ page_content=' A: Comparison of results with Image Composite Software and software implemented in OpenCV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
220
+ page_content=' B: Results with OpenCV implemented software on artificial cornea data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
221
+ page_content=' (a) shows the composite result and (b) shows the overlap of the component images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
222
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
223
+ page_content='2B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
224
+ page_content=' This is the synthesis result when the focus is per- fectly aligned and there are no blurred images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
225
+ page_content=' The result is almost the same as the correct data, where there is no unnat- ural overlap between the composite images, indicating that compositing was performed correctly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
226
+ page_content=' If the in-focus images are well extracted, they can be integrated well using OpenCV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
227
+ page_content=' Overall, ICS is a more robust method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
228
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
229
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
230
+ page_content=' Synthesis results with ICS As previously mentioned, when taking moving images of the mouse cornea with the contact specular microscope, the images are taken in an upward direction from the center of the cornea, then leftward along the edge of the cornea, and downward once around the edge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
231
+ page_content=' Next, it was photographed clockwise and then down to the center.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
232
+ page_content=' For the 94 videos, the left- and right-rotated portions were split, and for each case, an integrated image was created using the ICS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
233
+ page_content=' The results are presented in Table1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
234
+ page_content=' An image was classified as ”image dropout” if the center of the image was missing, ”distorted shape” if the shape was distorted instead of being circular, and ”unnatural paste” if the image was pasted in an unnat- ural location.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
235
+ page_content=' On the other hand, an image in which the shape of the cornea in the combined panoramic image was kept circular, the center was not missing, and there were no unnaturally pasted parts was classified as a ”success”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
236
+ page_content=' The number of images in which either the right- or left-rotated part of the image was correctly merged was 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
237
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
238
+ page_content='3 com- pares the results of the manual and ICS syntheses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
239
+ page_content=' Among the videos that failed to be synthesized using ICS, those with distorted shapes or unnatural pasting were verified to contain frames that deviated from the cornea owing to contamina- tion on the specular microscope or a shaking camera.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
240
+ page_content=' It is considered that the stains themselves became feature points, and the pasting of the composite part failed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
241
+ page_content=' Additionally, the position of the image was significantly changed because of significant blurring, resulting in an unnatural position for Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
242
+ page_content=' Synthesis results with ICS Result Left Right image dropout 15 20 distorted shape 6 12 unnatural paste 10 6 success 63 56 pasting and distorting the overall shape of the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
243
+ page_content=' The results of the ICS and manually composited images were almost identical, suggesting that there was no problem with the image-compositing algorithm and that the mouse cornea was not captured in the first place when the specular microscope was used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
244
+ page_content=' The image was taken from the cen- ter of the mouse cornea in an upward direction, followed by leftward rotation along the edge of the cornea, and then a downward direction was taken at the point where the image had gone around the edge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
245
+ page_content=' If there is an area in the center of the cornea that has not been photographed at this time, the image will be missing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
246
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
247
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
248
+ page_content=' Segmentation of guttae in panoramic images The model that detects the guttae location was applied to the panoramic images of the videos (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
249
+ page_content='1, Dataset 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
250
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
251
+ page_content='4 shows two prediction examples of the guttae position in a panoramic image: (c) and (d) are the prediction results in (a) and (b), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
252
+ page_content=' The original panoramic images were combined using ICS, and the edges of the corneas were cropped after sharpening.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
253
+ page_content=' Segmentation was performed on the images synthesized from the entire cornea using ICS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
254
+ page_content=' Currently, 21 still images and a mask image annotated with guttae are used for segmen- tation into training and validation datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
255
+ page_content=' The model was evaluated by comparing the validation data with predicted re- sults.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
256
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
257
+ page_content='4 shows that while the segmentation of the likely (c) Panoramic image by manually (Panoramic image was integrated Successfully.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
258
+ page_content=') (d) Panoramic image by manually (There is a hole in the center.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
259
+ page_content=') (a) Panoramic image by Image Composite Software (Panoramic image was integrated Successfully.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
260
+ page_content=') (b) Panoramic image by Image Composite Software (There is a hole in the center.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
261
+ page_content=') Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
262
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
263
+ page_content=' Integrated image results by Image Composite Software and manual composite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
264
+ page_content='The left side of each represents the composite result, and the right side represents the overlap of the component images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
265
+ page_content=' (a) Panoramic image with corneal rim removed (b) Prediction results for guttae location (c) Panoramic image with corneal rim removed (d) Prediction results for guttae location Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
266
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
267
+ page_content=' Two examples of panoramic images and guttae loca- tions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
268
+ page_content=' guttae is successful, it also partially predicts the guttae at the edges of the cornea.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
269
+ page_content=' Further quantitative evaluation is es- sential;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
270
+ page_content=' however, it may pose some limitations for future re- search.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
271
+ page_content=' For this purpose, it is necessary to prepare an image to which the Grand Truth of the guttae is assigned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
272
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
273
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
274
+ page_content=' Discussion It was observed that the algorithm implemented in OpenCV could not correctly synthesize results using mouse corneal endothelial cell images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
275
+ page_content=' The results showed that the images were pasted in unnatural positions compared to those obtained using artificial cornea data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
276
+ page_content=' The significant differ- ence between the mouse corneal endothelial cell image data and the artificial cornea data is that, with the artificial cornea data, all images are in focus and the images themselves are not blurred.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
277
+ page_content=' For the image data of mouse corneal endothe- lial cells, the process of extracting images in focus involved extracting images at equal intervals in chronological order, which resulted in the extraction of out-of-focus images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
278
+ page_content=' In contrast, the ICS-based method synthesized the images more accurately than the OpenCV-based synthesis software did be- cause ICS uses a feature extraction method appropriate for corneal endothelial cells.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
279
+ page_content=' By contrast, OpenCV synthesis uses SIFT for feature ex- traction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
280
+ page_content=' This synthesis is considered to be progressing well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
281
+ page_content=' Until now, it has not been possible to obtain images of the entire cornea because of the narrow imaging range of spec- ular microscopy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
282
+ page_content=' This study suggests that it is possible to obtain a composite image of the entire cornea by extracting a relatively focused image from a video of the entire cornea.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
283
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
284
+ page_content=' CONCLUSIONS AND FUTURE WORK The status of the entire cornea is currently inferred from the center of diagnosis and observation of corneal endothe- lial cells using specular microscopy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
285
+ page_content=' If images of the entire cornea could be obtained, more studies would be possible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
286
+ page_content=' In this study, we proposed a framework for generating im- ages of the entire cornea from videos captured using contact specular microscopy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
287
+ page_content=' Focused images were extracted from the video and a panoramic composite image was generated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
288
+ page_content=' Furthermore, we constructed a learning model, U-Net, to ex- tract the guttae from the entire image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
289
+ page_content=' To study the effec- tiveness of the proposed framework, we implemented it and applied it to corneal data from a mouse model of FECD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
290
+ page_content=' The panorama synthesis application used in the implementation was our custom-built ICS and the OpenCV algorithm, which is an open-source software.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
291
+ page_content=' Artificial corneal images were synthesized with no unnatural aspects in the results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
292
+ page_content=' How- ever, some of the extracted images were not correctly syn- thesized if they contained blurred images, and many images were correctly synthesized using ICS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
293
+ page_content=' After the panorama was merged, the image was divided into a grid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
294
+ page_content=' Majority of the in-focus images were extracted and pasted, resulting in a sharper image than the previous output obtained using ICS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
295
+ page_content=' Using the extracted images within the region, we could also predict the guttae location.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
296
+ page_content=' Al- though the implementation and application of the method to the data in this study confirmed its effectiveness, few quanti- tative evaluations have been performed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
297
+ page_content=' Quantitative evalua- tion, such as the accuracy of implementation, is an issue for the future.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
298
+ page_content=' REFERENCES [1] Allen O Eghrari, S Amer Riazuddin, and John D Gottsch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
299
+ page_content=' Fuchs corneal dystrophy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
300
+ page_content=' Progress in molecu- lar biology and translational science, 134:79–97, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
301
+ page_content=' [2] Gargi Gouranga Nanda and Debasmita Pankaj Alone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
302
+ page_content=' Current understanding of the pathogenesis of fuchs’ en- dothelial corneal dystrophy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
303
+ page_content=' Molecular vision, 25:295, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
304
+ page_content=' [3] Philippe Gain, R´emy Jullienne, Zhiguo He, Mansour Aldossary, Sophie Acquart, Fabrice Cognasse, and Gilles Thuret.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
305
+ page_content=' Global survey of corneal transplantation and eye banking.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
306
+ page_content=' JAMA ophthalmology, 134(2):167– 173, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
307
+ page_content=' [4] Naoki Okumura, Shigeru Kinoshita, and Noriko Koizumi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
308
+ page_content=' Application of rho kinase inhibitors for the treatment of corneal endothelial diseases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
309
+ page_content=' Journal of ophthalmology, 2017, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
310
+ page_content=' [5] Naoki Okumura, Shigeru Kinoshita, and Noriko Koizumi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
311
+ page_content=' The role of rho kinase inhibitors in corneal endothelial dysfunction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
312
+ page_content=' Current Pharmaceutical De- sign, 23(4):660–666, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
313
+ page_content=' [6] Jay W McLaren, Lori A Bachman, Katrina M Kane, and Sanjay V Patel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
314
+ page_content=' Objective assessment of the corneal endothelium in fuchs’ endothelial dystrophy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
315
+ page_content=' Investiga- tive ophthalmology & visual science, 55(2):1184–1190, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
316
+ page_content=' [7] Naoki Okumura, Shohei Yamada, Takeru Nishikawa, Kaito Narimoto, Kengo Okamura, Ayaka Izumi, Satoru Hiwa, Tomoyuki Hiroyasu, and Noriko Koizumi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
317
+ page_content=' U- net convolutional neural network for segmenting the corneal endothelium in a mouse model of fuchs en- dothelial corneal dystrophy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
318
+ page_content=' Cornea, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
319
+ page_content=' [8] Takeru Nishikawa, Naoki Okumura, Kaito Narimoto, Shohei Yamada, Kengo Okamura, Ayaka Izumi, and Noriko Koizumi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
320
+ page_content=' Deep neural network for the anal- ysis of guttae via semi-supervised learning in a fuchs endothelial corneal dystrophy mouse model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
321
+ page_content=' Investiga- tive Ophthalmology & Visual Science, 62(8):826–826, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
322
+ page_content=' [9] Hiroshi Tanaka, Naoki Okumura, Noriko Koizumi, Chie Sotozono, Yasuhiro Sumii, and Shigeru Kinoshita.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
323
+ page_content=' Panoramic view of human corneal endothelial cell layer observed by a prototype slit-scanning wide-field con- tact specular microscope.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
324
+ page_content=' British Journal of Ophthal- mology, 101(5):655–659, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
325
+ page_content=' [10] Matthew Brown and David G Lowe.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
326
+ page_content=' Automatic panoramic image stitching using invariant features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
327
+ page_content=' In- ternational journal of computer vision, 74(1):59–73, 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
328
+ page_content=' [11] David G Lowe.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
329
+ page_content=' Distinctive image features from scale- invariant keypoints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
330
+ page_content=' International journal of computer vision, 60(2):91–110, 2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
331
+ page_content=' [12] Eric Krotkov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
332
+ page_content=' Focusing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
333
+ page_content=' International Journal of Com- puter Vision, 1(3):223–237, 1988.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
334
+ page_content=' [13] Said Pertuz, Domenec Puig, and Miguel Angel Garcia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
335
+ page_content=' Analysis of focus measure operators for shape-from- focus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
336
+ page_content=' Pattern Recognition, 46(5):1415–1432, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
337
+ page_content=' [14] Olaf Ronneberger, Philipp Fischer, and Thomas Brox.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
338
+ page_content=' U-net: Convolutional networks for biomedical image segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
339
+ page_content=' In International Conference on Medical image computing and computer-assisted intervention, pages 234–241.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
340
+ page_content=' Springer, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
341
+ page_content=' [15] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
342
+ page_content=' Deep residual learning for image recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
343
+ page_content=' In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770–778, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
344
+ page_content=' [16] Diederik P Kingma and Jimmy Ba.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
345
+ page_content=' Adam: A method for stochastic optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
346
+ page_content=' arXiv preprint arXiv:1412.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
347
+ page_content='6980, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4NE0T4oBgHgl3EQfeQCD/content/2301.02388v1.pdf'}
4tE0T4oBgHgl3EQfegAX/content/tmp_files/2301.02390v1.pdf.txt ADDED
@@ -0,0 +1,491 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.02390v1 [eess.IV] 6 Jan 2023
2
+ Deep-learning models in medical image analysis:
3
+ Detection of esophagitis from the Kvasir Dataset
4
+ Kyoka Yoshioka1†, Kensuke Tanioka2, Satoru Hiwa2 and Tomoyuki Hiroyasu2
5
+ 1Graduate School of Life and Medical Sciences, Doshisha University, Kyoto, Japan
6
+ 2Department of Biomedical Sciences and Informatics, Doshisha University, Kyoto, Japan
7
+ (Tel: +81-774-65-6020; E-mail: tomo@is.doshisha.ac.jp)
8
+ Abstract: Early detection of esophagitis is important because this condition can progress to cancer if left untreated. However,
9
+ the accuracies of different deep learning models in detecting esophagitis have yet to be compared. Thus, this study aimed to
10
+ compare the accuracies of convolutional neural network models (GoogLeNet, ResNet-50, MobileNet V2, and MobileNet V3) in
11
+ detecting esophagitis from the open Kvasir dataset of endoscopic images. Results showed that among the models, GoogLeNet
12
+ achieved the highest F1-scores. Based on the average of true positive rate, MobileNet V3 predicted esophagitis more confidently
13
+ than the other models. The results obtained using the models were also compared with those obtained using SHapley Additive
14
+ exPlanations and Gradient-weighted Class Activation Mapping.
15
+ Keywords: Kvasir dataset, Deep Learning, Convolutional Neural Networks, Gradient-Weighted Class Activation Mapping,
16
+ SHAP, SHapley Additive exPlanation
17
+ 1. INTRODUCTION
18
+ With the development of artificial intelligence (AI), sev-
19
+ eral studies have focused on the application of this technol-
20
+ ogy in the medical field.
21
+ In gastroenterology, AI is used
22
+ to detect inflammation, polyps, and stomach cancer and de-
23
+ velop systems that can automatically determine the severity
24
+ of symptoms [1] [2] [3] [4]. AI models are expected to im-
25
+ prove diagnostic accuracy and reduce medical costs by pre-
26
+ venting misdiagnosis by humans.
27
+ Various deep learning and AI models, including deep
28
+ learning convolutional neural network (CNN) models, have
29
+ been proposed and used for medical image recognition and
30
+ analysis. However, these models differ in accuracy, and com-
31
+ paring this aspect is important to identify which model is
32
+ suitable for a specific application in endoscopic imaging.
33
+ The z-line is an anatomic landmark located posterior to
34
+ the stomach and esophagus. Esophagitis is an inflammation
35
+ of the esophagus that appears as a break in the esophageal
36
+ mucosa relative to the z-line [5]. The z-line and esophagitis
37
+ can be described as normal and diseased conditions, respec-
38
+ tively. Early detection of esophagitis is necessary because
39
+ this condition can cause complications (e.g., esophageal ul-
40
+ cer, bleeding, and stricture) and progress to cancer if left
41
+ untreated. Therefore, distinguishing between the z-line and
42
+ esophagitis is necessary. However, this procedure is difficult
43
+ [6]. In addition, the accuracies of various models in detecting
44
+ esophagitis have yet to be compared.
45
+ Thus, this study aimed to compare the accuracies of sev-
46
+ eral CNN models, including GoogLeNet [7], ResNet-50 [8],
47
+ MobileNet V2 [9], and MobileNet V3 [10], in identifying
48
+ z-lines and esophagitis in endoscopic images from the open
49
+ Kvasir dataset. These models have received considerable at-
50
+ tention in recent years after winning in the ImageNet Large
51
+ Scale Visual Recognition Challenge (ILSVRC), a competi-
52
+ tion using a large image recognition dataset. The results ob-
53
+ † Kyoka Yoshioka is the presenter of this paper.
54
+ tained by the four CNN models were compared. The training
55
+ models were also compared with the explainable artificial in-
56
+ telligence (XAI) methods Gradient-weighted Class Activa-
57
+ tion Mapping (Grad-CAM) [11] and SHapley Additive ex-
58
+ Planations (SHAP) [12].
59
+ 2. DEEP LEARNING IN MEDICAL IMAGE
60
+ ANALYSIS
61
+ 2.1. Typical architecture for image classification
62
+ CNN is a deep learning method specialized for image
63
+ recognition. It is widely used for identifying lesion sites in
64
+ medical images. It combines a convolutional layer with a
65
+ pooling layer and finally iterates through all the combined
66
+ layers to generate the results. In this study, we compared
67
+ the results of different CNN models used for site identifi-
68
+ cation in medical images. The CNN models used included
69
+ GoogleNet and ResNet, the successive winning models of
70
+ ILSVRC, and MobileNet V2 and MobileNet V3, which have
71
+ attracted considerable attention in recent years because of
72
+ their small computational and memory.
73
+ 2.1.1. GoogLeNet
74
+ GoogLeNet was the winning model at ILSVRC in 2014
75
+ The model consists of an Inception module, 1×1 convolu-
76
+ tion, auxiliary loss, and global average pooling. GoogLeNet
77
+ can be multi-layered using the Inception module, but 1×1
78
+ convolution is performed before each convolution calcula-
79
+ tion to reduce dimensionality resulting from the large num-
80
+ ber of parameters. The Inception module helps process data
81
+ using multiple filters in parallel. The fully connected layer
82
+ is removed to increase the width and depth of the network,
83
+ average pooling is used instead of the fully connected layer
84
+ to avoid gradient loss, and class classification is performed
85
+ on sub-networks branched from the middle of the network
86
+ by auxiliary loss [7].
87
+
88
+ 2.1.2. ResNet
89
+ ResNet was the winning model at the ILSVRC in 2015.
90
+ The problem of learning not progressing due to gradient loss
91
+ and degradation problems was solved using a method called
92
+ Residual Block, which uses 152 very deep layers to solve the
93
+ problem. The key features of this model are residual block
94
+ and batch normalization using shortcut connection. ResNet
95
+ has several models with different layer depths. ResNet-50
96
+ shows higher accuracy than GoogLeNet in ImageNet clas-
97
+ sification [8]. However, ResNet-50 requires about twice as
98
+ many parameters as GoogLeNet.
99
+ 2.1.3. MobileNet V2
100
+ MobileNet is a small computationally and memory model
101
+ that can adjust the trade-off between accuracy and compu-
102
+ tational load. Depthwise separable convolution decomposes
103
+ the convolution layer into depthwise and pointwise convolu-
104
+ tion for computation. This mechanism reduces the compu-
105
+ tation cost. Furthermore, V2 introduces expand/projection
106
+ layers and inverted residual blocks. Expand/projection lay-
107
+ ers rapidly increase or decrease the number of channels. Mo-
108
+ bileNet V2 achieves comparable accuracy to GoogLeNet and
109
+ ResNet-50 in ImageNet classification while significantly re-
110
+ ducing the number of parameters [9].
111
+ 2.1.4. MobileNet V3
112
+ MobileNet V3 is an improved version of MobileNet V2,
113
+ introducing a squeeze-and-excite structure (SE-block) in the
114
+ inverted residual block, one of the features of MobileNet
115
+ V2. SE-block improves the expressiveness of the model by
116
+ weighting information in the channel direction [13]. Com-
117
+ pared with V2, MobileNet V3 shows more accurate Im-
118
+ ageNet classification while shortening total inference time
119
+ [10].
120
+ 2.2. Explainable AI (XAI)
121
+ The CNN models were compared with XAI methods
122
+ Grad-CAM and SHAP. The Discussion section explains the
123
+ results obtained using these techniques.
124
+ 2.2.1. Grad CAM
125
+ Grad-CAM displays a color map of the area the CNN is
126
+ gazing at for classification [11]. It is based on the fact that
127
+ variables with large gradients in the output values of the pre-
128
+ dicted class are essential for classification prediction. The
129
+ gradient of each input image pixel with respect to the output
130
+ value of the prediction class in the last convolution layer is
131
+ used.
132
+ 2.2.2. SHAP
133
+ SHAP calculates, for each predicted value, how each char-
134
+ acteristic variable affects that prediction [12]. This analysis
135
+ allows us to visualize the impact of an increase or decrease
136
+ in the value of a given characteristic variable.
137
+ 3. MATERIALS AND METHODS
138
+ CNN models GoogLeNet, ResNet-50, MobileNet V2, and
139
+ MobileNet V3 were employed to detect esophagitis from the
140
+ open Kvasir dataset of endoscopic images, and their results
141
+ were compared.
142
+ 3.1. Kvasir dataset
143
+ The Kvasir dataset is a collection of endoscopic images of
144
+ the gastrointestinal tract. It was annotated and validated by
145
+ certified endoscopists. The dataset was made available in the
146
+ fall of 2017 through the Medical Multimedia Challenge pro-
147
+ vided by MediaEval. It includes anatomical landmarks (py-
148
+ lorus, z-line, and cecum), disease states (esophagitis, ulcera-
149
+ tive colitis, and polyps), and medical procedures (dyed lifted
150
+ polyps and dyed resection margins). The resolution of the
151
+ images from the Kvasir dataset with these eight classes varies
152
+ from 720×576 pixels to 1920×1072 pixels. Each image has
153
+ a different shooting angle, resolution, brightness, magnifica-
154
+ tion, and center point.
155
+ 3.2. Prepossessing
156
+ Image prepossessing was performed before training the
157
+ models. Edge artifacts and annotations that interfere with
158
+ learning during the analysis of medical images were re-
159
+ moved. A mask image was created, where pixels with lu-
160
+ minance values below a certain threshold were set to 0. The
161
+ opening process was applied to the mask image to remove the
162
+ annotations. The image was cropped using this final mask
163
+ image to obtain the target area. This process was performed
164
+ on all data.
165
+ Each image in the dataset has a different resolution. All
166
+ images were resized to 224×224 pixels by bilinear comple-
167
+ tion and optimized for deep learning input. In addition to
168
+ these processes, data augmentation was performed on the
169
+ data used for learning. We applied two types of data aug-
170
+ mentation: horizontal and vertical flip.
171
+ 3.3. Cross Validation
172
+ A total of 1000 image data sets containing z-lines and
173
+ esophagitis were partitioned into test, training, and validation
174
+ data. First, 25% (n = 250) of the total data were randomly se-
175
+ lected to generate test data. Of the remaining data (75%, n =
176
+ 750), 50% (n = 500) was used for training and 25% (n = 250)
177
+ for validation.
178
+ The inner loop consisted of training and validation data.
179
+ The model was trained using the training data, and parame-
180
+ ters such as the optimal number of epochs were determined
181
+ using the validation data. Thus, four training models were
182
+ generated. The test data of each model were evaluated, and
183
+ the average of discrimination accuracy of the four times was
184
+ used as the evaluation value of the CNN model. The test,
185
+ training, and validation data were each partitioned to main-
186
+ tain the class proportions.
187
+ 3.4. CNN models
188
+ PyTorch was used for the implementation of GoogLeNet,
189
+ ResNet-50, MobileNet V2, and MobileNet V3.
190
+ The ini-
191
+ tial values of all model parameters were pre-trained by Ima-
192
+ geNet, and the models were trained by fine tuning.
193
+ For all models, the Adam optimizer was used for training.
194
+ The batch size was five, and the maximum number of epochs
195
+
196
+ was 100. The cross-entropy error shown in equation (1) was
197
+ used as the loss function.
198
+ E(x)
199
+ =
200
+
201
+ N
202
+
203
+ n=1
204
+ K
205
+
206
+ k=1
207
+ dnk log yk(xn; w)
208
+ (1)
209
+ 3.5. Evaluation Function
210
+ Five evaluation indices were used in this experiment: ac-
211
+ curacy, precision, recall, specificity, and F1-score. These
212
+ metrics were calculated using the confusion matrix shown
213
+ in Table 1.
214
+ Table 1. Confusion matrix for a two-class problem
215
+ Predicted Class
216
+ (Positive Class)
217
+ Predicted Class
218
+ (Negative Class)
219
+ Actual Class
220
+ (Positive Class)
221
+ True Positive
222
+ False Negative
223
+ Actual Class
224
+ (Negative Class)
225
+ False Positive
226
+ True Negative
227
+ In this experiment, the z-line and esophagitis were judged
228
+ as the negative and positive classes, respectively. In other
229
+ words, data judged to be esophagitis and z-line by the learn-
230
+ ing model were designated true positive (TP) and false neg-
231
+ ative (FN), respectively.
232
+ Meanwhile, data determined to
233
+ be esophagitis and z-line by the training model were des-
234
+ ignated false positive (FP) and true negative (TN), respec-
235
+ tively. Based on the values of TP, FP, TN, and FN obtained
236
+ from the confusion matrix, the accuracy, precision, recall,
237
+ specificity, and F1-score of the models were calculated using
238
+ Equations(2) to (6).
239
+ Accuracy =
240
+ T P + T N
241
+ T P + FP + FN + T N
242
+ (2)
243
+ Precision =
244
+ T P
245
+ T P + FP
246
+ (3)
247
+ Recall =
248
+ T P
249
+ T P + FN
250
+ (4)
251
+ Specificity =
252
+ T N
253
+ T N + FP
254
+ (5)
255
+ F1 score =
256
+ 2T N
257
+ 2T P + FP + FN
258
+ (6)
259
+ 4. RESULTS AND DISCUSSIONS
260
+ 4.1. Performance comparison between different archi-
261
+ tecture
262
+ The evaluation indices obtained from the experiments are
263
+ shown in Table 2.
264
+ The F1-score results in Table 2 show that GoogLeNet was
265
+ the best among the four models. In other words, GoogLeNet
266
+ was more reliable in predicting esophagitis than the other
267
+ models. Meanwhile, MobileNet V3 showed the highest pre-
268
+ cision and specificity. In other words, MobileNet V3 was
269
+ the most accurate among the tested models for z-line predic-
270
+ tion. From a medical point of view, an ideal model should be
271
+ Table 2. Performance comparison between different
272
+ architecture
273
+ Model
274
+ ACC
275
+ PREC
276
+ REC
277
+ SPEC
278
+ F1
279
+ GoogLeNet
280
+ 0.846
281
+ 0.859
282
+ 0.830
283
+ 0.862
284
+ 0.843
285
+ MobileNet V3
286
+ 0.842
287
+ 0.901
288
+ 0.776
289
+ 0.908
290
+ 0.831
291
+ ResNet-50
292
+ 0.833
293
+ 0.865
294
+ 0.792
295
+ 0.874
296
+ 0.826
297
+ MobileNet V2
298
+ 0.830
299
+ 0.852
300
+ 0.800
301
+ 0.860
302
+ 0.825
303
+ likely to distinguish esophagitis with severe symptoms from
304
+ the z-line.
305
+ The average of TP rate were 0.950, 0.923, 0.892, and
306
+ 0.841 for MobileNet V3, MobileNet V2, GoogLeNet, and
307
+ ResNet-50, respectively. MobileNet V3 predicted esophagi-
308
+ tis with more confidence than the other models.
309
+ 4.2. GoogLeNet analysis
310
+ Grad-CAM and SHAP were applied to the learned model,
311
+ and what kind of the model was created was discussed.
312
+ Fig.1 shows an example of the image results in the case of
313
+ TP predicted by GoogLeNet. In the Grad-CAM results, red
314
+ indicates the most potent activation, and blue indicates the
315
+ weakest activation. In the SHAP results, the SHAP values
316
+ of the patches were computed and rendered in a color map:
317
+ a positive SHHAP value (red) indicates that the class is sup-
318
+ ported. By contrast, a negative SHAP value (blue) indicates
319
+ that the class is rejected.
320
+ Tearing the esophageal mucosa against the z-line is a
321
+ feature of esophagitis.
322
+ According to Fig.1, the results of
323
+ Grad-CAM and SHAP showed that the learned model of
324
+ GoogLeNet can makes predictions focusing on the clinically
325
+ significant aspects of esophagitis images. The GoogLeNet
326
+ model learned the findings that are important for diagnosing
327
+ esophagitis. Comparison results showed that SHAP captured
328
+ the location of multiple mucosal tears in the image more ac-
329
+ curately than Grad-CAM.
330
+ Fig.2 shows the results of applying Grad-CAM and SHAP
331
+ in the FN case. The following can be observed from the re-
332
+ sults of Grad-CAM and SHAP for Fig.2, respectively. In the
333
+ Grad-CAM results, most areas in the image are shown as
334
+ activated regions. Areas that provide the basis for the pre-
335
+ diction are difficult to identify because of the gradient satu-
336
+ ration in the Grad-CAM calculation. In the SHAP results,
337
+ the inflammatory areas of the input image are indicated by
338
+ blue pixels. Blue pixels indicate features that have a negative
339
+ contribution to the prediction. In other words, although the
340
+ model incorrectly identified esophagitis as a z-line, the model
341
+ recognized that areas in the image negatively contributed to
342
+ the z-line decision.
343
+ 4.3. MobileNet V3 analysis
344
+ One hundred images were determined to be TP in the
345
+ MobileNet V3 model.
346
+ The SHAP results for the images
347
+ judged to have the highest and lowest probabilities of being
348
+ esophagitis are shown in Fig.3.
349
+ As shown in Fig.3, in cases with a high prediction proba-
350
+ bility, some features may have a negative contribution to the
351
+
352
+ (a) Raw image
353
+ (b) Grad-CAM
354
+ (c) SHAP
355
+ Fig. 1. True Positive Pattern
356
+ (a) Raw image
357
+ (b) Grad-CAM
358
+ (c) SHAP
359
+ Fig. 2. False Negative Pattern
360
+ Fig. 3. First image predicted positive with 1.000 probability, and second image predicted positive with 0.524 probability.
361
+ prediction. Many features showing negative contributions
362
+ can be identified in the images with low prediction proba-
363
+ bility for Fig.3. In this case, the prediction probability may
364
+ be low.
365
+ 5. CONCLUSIONS
366
+ We compared the accuracies of CNN models, including
367
+ GoogLeNet, ResNet-50, MobileNet V2, and MobileNet V3,
368
+ in identifying z-line and esophagitis in endoscopic images
369
+ from the open Kvasir dataset.
370
+ Among the four models,
371
+ GoogLeNet had the highest F1-score, and MobileNet V3
372
+ had the highest average TP rate. These results suggest that
373
+ GoogLeNet performs better than state-of-the-art CNN mod-
374
+ els in medical image recognition. In addition, MoblieNet V3
375
+ is a cost-effective model because of its low memory and short
376
+ training time. Each model was analyzed and compared with
377
+ Grad-CAM, and SHAP. Other models, datasets, and model
378
+ analyses are warranted for verification.
379
+ REFERENCES
380
+ [1]
381
+ Peng-Jen Chen, Meng-Chiung Lin, Mei-Ju Lai, Jung-
382
+ Chun Lin, Henry Horng-Shing Lu, and Vincent S
383
+ Tseng. Accurate classification of diminutive colorectal
384
+ polyps using computer-aided analysis. Gastroenterol-
385
+ ogy, 154(3):568–575, 2018.
386
+ [2]
387
+ Toshiaki Hirasawa, Kazuharu Aoyama, Tetsuya Tan-
388
+ imoto, Soichiro Ishihara, Satoki Shichijo, Tsuyoshi
389
+ Ozawa, Tatsuya Ohnishi, Mitsuhiro Fujishiro, Keigo
390
+ Matsuo, Junko Fujisaki, et al. Application of artificial
391
+ intelligence using a convolutional neural network for
392
+ detecting gastric cancer in endoscopic images. Gastric
393
+ Cancer, 21(4):653–660, 2018.
394
+
395
+ 0.0010
396
+ 0.0005
397
+ 0.0000
398
+ -0.0005
399
+ 0.00100.002
400
+ 0.001
401
+ 0.000
402
+ 0.001
403
+ 0.002E00'0
404
+ 0.002
405
+ 0.001
406
+ 0000-
407
+ 0.001
408
+ 0.002
409
+ E00'0-0.004
410
+ E000
411
+ 0.002
412
+ 0.001
413
+ 0.000
414
+ 0.001
415
+ 0.002
416
+ 0.003
417
+ 0.004[3]
418
+ Pedro Guimar˜aes, Andreas Keller, Tobias Fehlmann,
419
+ Frank Lammert, and Markus Casper.
420
+ Deep-learning
421
+ based detection of gastric precancerous conditions.
422
+ Gut, 69(1):4–6, 2020.
423
+ [4]
424
+ Yaqiong Zhang, Fengxia Li, Fuqiang Yuan, Kai Zhang,
425
+ Lijuan Huo, Zichen Dong, Yiming Lang, Yapeng
426
+ Zhang, Meihong Wang, Zenghui Gao, et al. Diagnosing
427
+ chronic atrophic gastritis by gastroscopy using artificial
428
+ intelligence. Digestive and Liver Disease, 52(5):566–
429
+ 572, 2020.
430
+ [5]
431
+ Konstantin
432
+ Pogorelov,
433
+ Kristin
434
+ Ranheim
435
+ Randel,
436
+ Carsten Griwodz, Sigrun Losada Eskeland, Thomas
437
+ de Lange,
438
+ Dag Johansen,
439
+ Concetto Spampinato,
440
+ Duc-Tien Dang-Nguyen, Mathias Lux, Peter Thelin
441
+ Schmidt, et al. Kvasir: A multi-class image dataset for
442
+ computer aided gastrointestinal disease detection. In
443
+ Proceedings of the 8th ACM on Multimedia Systems
444
+ Conference, pages 164–169, 2017.
445
+ [6]
446
+ Timothy Cogan, Maribeth Cogan, and Lakshman
447
+ Tamil.
448
+ Mapgi:
449
+ accurate identification of anatomi-
450
+ cal landmarks and diseased tissue in gastrointestinal
451
+ tract using deep learning. Computers in biology and
452
+ medicine, 111:103351, 2019.
453
+ [7]
454
+ Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Ser-
455
+ manet, Scott Reed, Dragomir Anguelov, Dumitru Er-
456
+ han, Vincent Vanhoucke, and Andrew Rabinovich. Go-
457
+ ing deeper with convolutions. In Proceedings of the
458
+ IEEE conference on computer vision and pattern recog-
459
+ nition, pages 1–9, 2015.
460
+ [8]
461
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian
462
+ Sun. Deep residual learning for image recognition. In
463
+ Proceedings of the IEEE conference on computer vision
464
+ and pattern recognition, pages 770–778, 2016.
465
+ [9]
466
+ Mark Sandler, Andrew Howard, Menglong Zhu, An-
467
+ drey Zhmoginov, and Liang-Chieh Chen. Mobilenetv2:
468
+ Inverted residuals and linear bottlenecks. In Proceed-
469
+ ings of the IEEE conference on computer vision and
470
+ pattern recognition, pages 4510–4520, 2018.
471
+ [10] Andrew Howard, Mark Sandler, Grace Chu, Liang-
472
+ Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang,
473
+ Yukun Zhu, Ruoming Pang, Vijay Vasudevan, et al.
474
+ Searching for mobilenetv3.
475
+ In Proceedings of the
476
+ IEEE/CVF international conference on computer vi-
477
+ sion, pages 1314–1324, 2019.
478
+ [11] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek
479
+ Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv
480
+ Batra. Grad-cam: Visual explanations from deep net-
481
+ works via gradient-based localization. In Proceedings
482
+ of the IEEE international conference on computer vi-
483
+ sion, pages 618–626, 2017.
484
+ [12] Scott M Lundberg and Su-In Lee. A unified approach
485
+ to interpreting model predictions. Advances in neural
486
+ information processing systems, 30, 2017.
487
+ [13] Jie Hu, Li Shen, and Gang Sun. Squeeze-and-excitation
488
+ networks. In Proceedings of the IEEE conference on
489
+ computer vision and pattern recognition, pages 7132–
490
+ 7141, 2018.
491
+
4tE0T4oBgHgl3EQfegAX/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf,len=280
2
+ page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
3
+ page_content='02390v1 [eess.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
4
+ page_content='IV] 6 Jan 2023 Deep-learning models in medical image analysis: Detection of esophagitis from the Kvasir Dataset Kyoka Yoshioka1†, Kensuke Tanioka2, Satoru Hiwa2 and Tomoyuki Hiroyasu2 1Graduate School of Life and Medical Sciences, Doshisha University, Kyoto, Japan 2Department of Biomedical Sciences and Informatics, Doshisha University, Kyoto, Japan (Tel: +81-774-65-6020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
5
+ page_content=' E-mail: tomo@is.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
6
+ page_content='doshisha.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
7
+ page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
8
+ page_content='jp) Abstract: Early detection of esophagitis is important because this condition can progress to cancer if left untreated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
9
+ page_content=' However, the accuracies of different deep learning models in detecting esophagitis have yet to be compared.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
10
+ page_content=' Thus, this study aimed to compare the accuracies of convolutional neural network models (GoogLeNet, ResNet-50, MobileNet V2, and MobileNet V3) in detecting esophagitis from the open Kvasir dataset of endoscopic images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
11
+ page_content=' Results showed that among the models, GoogLeNet achieved the highest F1-scores.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
12
+ page_content=' Based on the average of true positive rate, MobileNet V3 predicted esophagitis more confidently than the other models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
13
+ page_content=' The results obtained using the models were also compared with those obtained using SHapley Additive exPlanations and Gradient-weighted Class Activation Mapping.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
14
+ page_content=' Keywords: Kvasir dataset, Deep Learning, Convolutional Neural Networks, Gradient-Weighted Class Activation Mapping, SHAP, SHapley Additive exPlanation 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
15
+ page_content=' INTRODUCTION With the development of artificial intelligence (AI), sev- eral studies have focused on the application of this technol- ogy in the medical field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
16
+ page_content=' In gastroenterology, AI is used to detect inflammation, polyps, and stomach cancer and de- velop systems that can automatically determine the severity of symptoms [1] [2] [3] [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
17
+ page_content=' AI models are expected to im- prove diagnostic accuracy and reduce medical costs by pre- venting misdiagnosis by humans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
18
+ page_content=' Various deep learning and AI models, including deep learning convolutional neural network (CNN) models, have been proposed and used for medical image recognition and analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
19
+ page_content=' However, these models differ in accuracy, and com- paring this aspect is important to identify which model is suitable for a specific application in endoscopic imaging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
20
+ page_content=' The z-line is an anatomic landmark located posterior to the stomach and esophagus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
21
+ page_content=' Esophagitis is an inflammation of the esophagus that appears as a break in the esophageal mucosa relative to the z-line [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
22
+ page_content=' The z-line and esophagitis can be described as normal and diseased conditions, respec- tively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
23
+ page_content=' Early detection of esophagitis is necessary because this condition can cause complications (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
24
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
25
+ page_content=', esophageal ul- cer, bleeding, and stricture) and progress to cancer if left untreated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
26
+ page_content=' Therefore, distinguishing between the z-line and esophagitis is necessary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
27
+ page_content=' However, this procedure is difficult [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
28
+ page_content=' In addition, the accuracies of various models in detecting esophagitis have yet to be compared.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
29
+ page_content=' Thus, this study aimed to compare the accuracies of sev- eral CNN models, including GoogLeNet [7], ResNet-50 [8], MobileNet V2 [9], and MobileNet V3 [10], in identifying z-lines and esophagitis in endoscopic images from the open Kvasir dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
30
+ page_content=' These models have received considerable at- tention in recent years after winning in the ImageNet Large Scale Visual Recognition Challenge (ILSVRC), a competi- tion using a large image recognition dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
31
+ page_content=' The results ob- † Kyoka Yoshioka is the presenter of this paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
32
+ page_content=' tained by the four CNN models were compared.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
33
+ page_content=' The training models were also compared with the explainable artificial in- telligence (XAI) methods Gradient-weighted Class Activa- tion Mapping (Grad-CAM) [11] and SHapley Additive ex- Planations (SHAP) [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
34
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
35
+ page_content=' DEEP LEARNING IN MEDICAL IMAGE ANALYSIS 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
36
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
37
+ page_content=' Typical architecture for image classification CNN is a deep learning method specialized for image recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
38
+ page_content=' It is widely used for identifying lesion sites in medical images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
39
+ page_content=' It combines a convolutional layer with a pooling layer and finally iterates through all the combined layers to generate the results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
40
+ page_content=' In this study, we compared the results of different CNN models used for site identifi- cation in medical images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
41
+ page_content=' The CNN models used included GoogleNet and ResNet, the successive winning models of ILSVRC, and MobileNet V2 and MobileNet V3, which have attracted considerable attention in recent years because of their small computational and memory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
42
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
43
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
44
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
45
+ page_content=' GoogLeNet GoogLeNet was the winning model at ILSVRC in 2014 The model consists of an Inception module, 1×1 convolu- tion, auxiliary loss, and global average pooling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
46
+ page_content=' GoogLeNet can be multi-layered using the Inception module, but 1×1 convolution is performed before each convolution calcula- tion to reduce dimensionality resulting from the large num- ber of parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
47
+ page_content=' The Inception module helps process data using multiple filters in parallel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
48
+ page_content=' The fully connected layer is removed to increase the width and depth of the network, average pooling is used instead of the fully connected layer to avoid gradient loss, and class classification is performed on sub-networks branched from the middle of the network by auxiliary loss [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
49
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
50
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
51
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
52
+ page_content=' ResNet ResNet was the winning model at the ILSVRC in 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
53
+ page_content=' The problem of learning not progressing due to gradient loss and degradation problems was solved using a method called Residual Block, which uses 152 very deep layers to solve the problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
54
+ page_content=' The key features of this model are residual block and batch normalization using shortcut connection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
55
+ page_content=' ResNet has several models with different layer depths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
56
+ page_content=' ResNet-50 shows higher accuracy than GoogLeNet in ImageNet clas- sification [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
57
+ page_content=' However, ResNet-50 requires about twice as many parameters as GoogLeNet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
58
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
59
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
60
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
61
+ page_content=' MobileNet V2 MobileNet is a small computationally and memory model that can adjust the trade-off between accuracy and compu- tational load.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
62
+ page_content=' Depthwise separable convolution decomposes the convolution layer into depthwise and pointwise convolu- tion for computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
63
+ page_content=' This mechanism reduces the compu- tation cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
64
+ page_content=' Furthermore, V2 introduces expand/projection layers and inverted residual blocks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
65
+ page_content=' Expand/projection lay- ers rapidly increase or decrease the number of channels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
66
+ page_content=' Mo- bileNet V2 achieves comparable accuracy to GoogLeNet and ResNet-50 in ImageNet classification while significantly re- ducing the number of parameters [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
67
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
68
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
69
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
70
+ page_content=' MobileNet V3 MobileNet V3 is an improved version of MobileNet V2, introducing a squeeze-and-excite structure (SE-block) in the inverted residual block, one of the features of MobileNet V2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
71
+ page_content=' SE-block improves the expressiveness of the model by weighting information in the channel direction [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
72
+ page_content=' Com- pared with V2, MobileNet V3 shows more accurate Im- ageNet classification while shortening total inference time [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
73
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
74
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
75
+ page_content=' Explainable AI (XAI) The CNN models were compared with XAI methods Grad-CAM and SHAP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
76
+ page_content=' The Discussion section explains the results obtained using these techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
77
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
78
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
79
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
80
+ page_content=' Grad CAM Grad-CAM displays a color map of the area the CNN is gazing at for classification [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
81
+ page_content=' It is based on the fact that variables with large gradients in the output values of the pre- dicted class are essential for classification prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
82
+ page_content=' The gradient of each input image pixel with respect to the output value of the prediction class in the last convolution layer is used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
83
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
84
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
85
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
86
+ page_content=' SHAP SHAP calculates, for each predicted value, how each char- acteristic variable affects that prediction [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
87
+ page_content=' This analysis allows us to visualize the impact of an increase or decrease in the value of a given characteristic variable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
88
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
89
+ page_content=' MATERIALS AND METHODS CNN models GoogLeNet, ResNet-50, MobileNet V2, and MobileNet V3 were employed to detect esophagitis from the open Kvasir dataset of endoscopic images, and their results were compared.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
90
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
91
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
92
+ page_content=' Kvasir dataset The Kvasir dataset is a collection of endoscopic images of the gastrointestinal tract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
93
+ page_content=' It was annotated and validated by certified endoscopists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
94
+ page_content=' The dataset was made available in the fall of 2017 through the Medical Multimedia Challenge pro- vided by MediaEval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
95
+ page_content=' It includes anatomical landmarks (py- lorus, z-line, and cecum), disease states (esophagitis, ulcera- tive colitis, and polyps), and medical procedures (dyed lifted polyps and dyed resection margins).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
96
+ page_content=' The resolution of the images from the Kvasir dataset with these eight classes varies from 720×576 pixels to 1920×1072 pixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
97
+ page_content=' Each image has a different shooting angle, resolution, brightness, magnifica- tion, and center point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
98
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
99
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
100
+ page_content=' Prepossessing Image prepossessing was performed before training the models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
101
+ page_content=' Edge artifacts and annotations that interfere with learning during the analysis of medical images were re- moved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
102
+ page_content=' A mask image was created, where pixels with lu- minance values below a certain threshold were set to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
103
+ page_content=' The opening process was applied to the mask image to remove the annotations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
104
+ page_content=' The image was cropped using this final mask image to obtain the target area.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
105
+ page_content=' This process was performed on all data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
106
+ page_content=' Each image in the dataset has a different resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
107
+ page_content=' All images were resized to 224×224 pixels by bilinear comple- tion and optimized for deep learning input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
108
+ page_content=' In addition to these processes, data augmentation was performed on the data used for learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
109
+ page_content=' We applied two types of data aug- mentation: horizontal and vertical flip.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
110
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
111
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
112
+ page_content=' Cross Validation A total of 1000 image data sets containing z-lines and esophagitis were partitioned into test, training, and validation data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
113
+ page_content=' First, 25% (n = 250) of the total data were randomly se- lected to generate test data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
114
+ page_content=' Of the remaining data (75%, n = 750), 50% (n = 500) was used for training and 25% (n = 250) for validation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
115
+ page_content=' The inner loop consisted of training and validation data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
116
+ page_content=' The model was trained using the training data, and parame- ters such as the optimal number of epochs were determined using the validation data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
117
+ page_content=' Thus, four training models were generated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
118
+ page_content=' The test data of each model were evaluated, and the average of discrimination accuracy of the four times was used as the evaluation value of the CNN model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
119
+ page_content=' The test, training, and validation data were each partitioned to main- tain the class proportions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
120
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
121
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
122
+ page_content=' CNN models PyTorch was used for the implementation of GoogLeNet, ResNet-50, MobileNet V2, and MobileNet V3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
123
+ page_content=' The ini- tial values of all model parameters were pre-trained by Ima- geNet, and the models were trained by fine tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
124
+ page_content=' For all models, the Adam optimizer was used for training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
125
+ page_content=' The batch size was five, and the maximum number of epochs was 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
126
+ page_content=' The cross-entropy error shown in equation (1) was used as the loss function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
127
+ page_content=' E(x) = − N � n=1 K � k=1 dnk log yk(xn;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
128
+ page_content=' w) (1) 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
129
+ page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
130
+ page_content=' Evaluation Function Five evaluation indices were used in this experiment: ac- curacy, precision, recall, specificity, and F1-score.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
131
+ page_content=' These metrics were calculated using the confusion matrix shown in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
132
+ page_content=' Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
133
+ page_content=' Confusion matrix for a two-class problem Predicted Class (Positive Class) Predicted Class (Negative Class) Actual Class (Positive Class) True Positive False Negative Actual Class (Negative Class) False Positive True Negative In this experiment, the z-line and esophagitis were judged as the negative and positive classes, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
134
+ page_content=' In other words, data judged to be esophagitis and z-line by the learn- ing model were designated true positive (TP) and false neg- ative (FN), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
135
+ page_content=' Meanwhile, data determined to be esophagitis and z-line by the training model were des- ignated false positive (FP) and true negative (TN), respec- tively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
136
+ page_content=' Based on the values of TP, FP, TN, and FN obtained from the confusion matrix, the accuracy, precision, recall, specificity, and F1-score of the models were calculated using Equations(2) to (6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
137
+ page_content=' Accuracy = T P + T N T P + FP + FN + T N (2) Precision = T P T P + FP (3) Recall = T P T P + FN (4) Specificity = T N T N + FP (5) F1 score = 2T N 2T P + FP + FN (6) 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
138
+ page_content=' RESULTS AND DISCUSSIONS 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
139
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
140
+ page_content=' Performance comparison between different archi- tecture The evaluation indices obtained from the experiments are shown in Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
141
+ page_content=' The F1-score results in Table 2 show that GoogLeNet was the best among the four models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
142
+ page_content=' In other words, GoogLeNet was more reliable in predicting esophagitis than the other models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
143
+ page_content=' Meanwhile, MobileNet V3 showed the highest pre- cision and specificity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
144
+ page_content=' In other words, MobileNet V3 was the most accurate among the tested models for z-line predic- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
145
+ page_content=' From a medical point of view, an ideal model should be Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
146
+ page_content=' Performance comparison between different architecture Model ACC PREC REC SPEC F1 GoogLeNet 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
147
+ page_content='846 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
148
+ page_content='859 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
149
+ page_content='830 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
150
+ page_content='862 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
151
+ page_content='843 MobileNet V3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
152
+ page_content='842 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
153
+ page_content='901 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
154
+ page_content='776 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
155
+ page_content='908 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
156
+ page_content='831 ResNet-50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
157
+ page_content='833 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
158
+ page_content='865 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
159
+ page_content='792 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
160
+ page_content='874 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
161
+ page_content='826 MobileNet V2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
162
+ page_content='830 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
163
+ page_content='852 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
164
+ page_content='800 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
165
+ page_content='860 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
166
+ page_content='825 likely to distinguish esophagitis with severe symptoms from the z-line.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
167
+ page_content=' The average of TP rate were 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
168
+ page_content='950, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
169
+ page_content='923, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
170
+ page_content='892, and 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
171
+ page_content='841 for MobileNet V3, MobileNet V2, GoogLeNet, and ResNet-50, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
172
+ page_content=' MobileNet V3 predicted esophagi- tis with more confidence than the other models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
173
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
174
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
175
+ page_content=' GoogLeNet analysis Grad-CAM and SHAP were applied to the learned model, and what kind of the model was created was discussed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
176
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
177
+ page_content='1 shows an example of the image results in the case of TP predicted by GoogLeNet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
178
+ page_content=' In the Grad-CAM results, red indicates the most potent activation, and blue indicates the weakest activation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
179
+ page_content=' In the SHAP results, the SHAP values of the patches were computed and rendered in a color map: a positive SHHAP value (red) indicates that the class is sup- ported.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
180
+ page_content=' By contrast, a negative SHAP value (blue) indicates that the class is rejected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
181
+ page_content=' Tearing the esophageal mucosa against the z-line is a feature of esophagitis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
182
+ page_content=' According to Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
183
+ page_content='1, the results of Grad-CAM and SHAP showed that the learned model of GoogLeNet can makes predictions focusing on the clinically significant aspects of esophagitis images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
184
+ page_content=' The GoogLeNet model learned the findings that are important for diagnosing esophagitis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
185
+ page_content=' Comparison results showed that SHAP captured the location of multiple mucosal tears in the image more ac- curately than Grad-CAM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
186
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
187
+ page_content='2 shows the results of applying Grad-CAM and SHAP in the FN case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
188
+ page_content=' The following can be observed from the re- sults of Grad-CAM and SHAP for Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
189
+ page_content='2, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
190
+ page_content=' In the Grad-CAM results, most areas in the image are shown as activated regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
191
+ page_content=' Areas that provide the basis for the pre- diction are difficult to identify because of the gradient satu- ration in the Grad-CAM calculation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
192
+ page_content=' In the SHAP results, the inflammatory areas of the input image are indicated by blue pixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
193
+ page_content=' Blue pixels indicate features that have a negative contribution to the prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
194
+ page_content=' In other words, although the model incorrectly identified esophagitis as a z-line, the model recognized that areas in the image negatively contributed to the z-line decision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
195
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
196
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
197
+ page_content=' MobileNet V3 analysis One hundred images were determined to be TP in the MobileNet V3 model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
198
+ page_content=' The SHAP results for the images judged to have the highest and lowest probabilities of being esophagitis are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
199
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
200
+ page_content=' As shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
201
+ page_content='3, in cases with a high prediction proba- bility, some features may have a negative contribution to the (a) Raw image (b) Grad-CAM (c) SHAP Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
202
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
203
+ page_content=' True Positive Pattern (a) Raw image (b) Grad-CAM (c) SHAP Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
204
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
205
+ page_content=' False Negative Pattern Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
206
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
207
+ page_content=' First image predicted positive with 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
208
+ page_content='000 probability, and second image predicted positive with 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
209
+ page_content='524 probability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
210
+ page_content=' prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
211
+ page_content=' Many features showing negative contributions can be identified in the images with low prediction proba- bility for Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
212
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
213
+ page_content=' In this case, the prediction probability may be low.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
214
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
215
+ page_content=' CONCLUSIONS We compared the accuracies of CNN models, including GoogLeNet, ResNet-50, MobileNet V2, and MobileNet V3, in identifying z-line and esophagitis in endoscopic images from the open Kvasir dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
216
+ page_content=' Among the four models, GoogLeNet had the highest F1-score, and MobileNet V3 had the highest average TP rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
217
+ page_content=' These results suggest that GoogLeNet performs better than state-of-the-art CNN mod- els in medical image recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
218
+ page_content=' In addition, MoblieNet V3 is a cost-effective model because of its low memory and short training time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
219
+ page_content=' Each model was analyzed and compared with Grad-CAM, and SHAP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
220
+ page_content=' Other models, datasets, and model analyses are warranted for verification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
221
+ page_content=' REFERENCES [1] Peng-Jen Chen, Meng-Chiung Lin, Mei-Ju Lai, Jung- Chun Lin, Henry Horng-Shing Lu, and Vincent S Tseng.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
222
+ page_content=' Accurate classification of diminutive colorectal polyps using computer-aided analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
223
+ page_content=' Gastroenterol- ogy, 154(3):568–575, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
224
+ page_content=' [2] Toshiaki Hirasawa, Kazuharu Aoyama, Tetsuya Tan- imoto, Soichiro Ishihara, Satoki Shichijo, Tsuyoshi Ozawa, Tatsuya Ohnishi, Mitsuhiro Fujishiro, Keigo Matsuo, Junko Fujisaki, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
225
+ page_content=' Application of artificial intelligence using a convolutional neural network for detecting gastric cancer in endoscopic images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
226
+ page_content=' Gastric Cancer, 21(4):653–660, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
227
+ page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
228
+ page_content='0010 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
229
+ page_content='0005 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
230
+ page_content='0000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
231
+ page_content='0005 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
232
+ page_content='00100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
233
+ page_content='002 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
234
+ page_content='001 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
235
+ page_content='000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
236
+ page_content='001 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
237
+ page_content="002E00'0 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
238
+ page_content='002 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
239
+ page_content='001 0000- 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
240
+ page_content='001 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
241
+ page_content="002 E00'0-0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
242
+ page_content='004 E000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
243
+ page_content='002 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
244
+ page_content='001 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
245
+ page_content='000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
246
+ page_content='001 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
247
+ page_content='002 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
248
+ page_content='003 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
249
+ page_content='004[3] Pedro Guimar˜aes, Andreas Keller, Tobias Fehlmann, Frank Lammert, and Markus Casper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
250
+ page_content=' Deep-learning based detection of gastric precancerous conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
251
+ page_content=' Gut, 69(1):4–6, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
252
+ page_content=' [4] Yaqiong Zhang, Fengxia Li, Fuqiang Yuan, Kai Zhang, Lijuan Huo, Zichen Dong, Yiming Lang, Yapeng Zhang, Meihong Wang, Zenghui Gao, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
253
+ page_content=' Diagnosing chronic atrophic gastritis by gastroscopy using artificial intelligence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
254
+ page_content=' Digestive and Liver Disease, 52(5):566– 572, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
255
+ page_content=' [5] Konstantin Pogorelov, Kristin Ranheim Randel, Carsten Griwodz, Sigrun Losada Eskeland, Thomas de Lange, Dag Johansen, Concetto Spampinato, Duc-Tien Dang-Nguyen, Mathias Lux, Peter Thelin Schmidt, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
256
+ page_content=' Kvasir: A multi-class image dataset for computer aided gastrointestinal disease detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
257
+ page_content=' In Proceedings of the 8th ACM on Multimedia Systems Conference, pages 164–169, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
258
+ page_content=' [6] Timothy Cogan, Maribeth Cogan, and Lakshman Tamil.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
259
+ page_content=' Mapgi: accurate identification of anatomi- cal landmarks and diseased tissue in gastrointestinal tract using deep learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
260
+ page_content=' Computers in biology and medicine, 111:103351, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
261
+ page_content=' [7] Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Ser- manet, Scott Reed, Dragomir Anguelov, Dumitru Er- han, Vincent Vanhoucke, and Andrew Rabinovich.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
262
+ page_content=' Go- ing deeper with convolutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
263
+ page_content=' In Proceedings of the IEEE conference on computer vision and pattern recog- nition, pages 1–9, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
264
+ page_content=' [8] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
265
+ page_content=' Deep residual learning for image recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
266
+ page_content=' In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770–778, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
267
+ page_content=' [9] Mark Sandler, Andrew Howard, Menglong Zhu, An- drey Zhmoginov, and Liang-Chieh Chen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
268
+ page_content=' Mobilenetv2: Inverted residuals and linear bottlenecks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
269
+ page_content=' In Proceed- ings of the IEEE conference on computer vision and pattern recognition, pages 4510–4520, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
270
+ page_content=' [10] Andrew Howard, Mark Sandler, Grace Chu, Liang- Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
271
+ page_content=' Searching for mobilenetv3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
272
+ page_content=' In Proceedings of the IEEE/CVF international conference on computer vi- sion, pages 1314–1324, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
273
+ page_content=' [11] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
274
+ page_content=' Grad-cam: Visual explanations from deep net- works via gradient-based localization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
275
+ page_content=' In Proceedings of the IEEE international conference on computer vi- sion, pages 618–626, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
276
+ page_content=' [12] Scott M Lundberg and Su-In Lee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
277
+ page_content=' A unified approach to interpreting model predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
278
+ page_content=' Advances in neural information processing systems, 30, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
279
+ page_content=' [13] Jie Hu, Li Shen, and Gang Sun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
280
+ page_content=' Squeeze-and-excitation networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
281
+ page_content=' In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7132– 7141, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE0T4oBgHgl3EQfegAX/content/2301.02390v1.pdf'}
6tAzT4oBgHgl3EQfEvoZ/content/tmp_files/2301.00997v1.pdf.txt ADDED
@@ -0,0 +1,848 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Detector and physics simulation using heavy ion collisions at
2
+ NICA-SPD
3
+ I. Denisenko1,a) and R. Pandey2,b)
4
+ 1Joint Institute for Nuclear Research, Joliot-Curie 6, Dubna-141980,
5
+ Moscow Region, Russia.
6
+ 2Graduate Engineer Trainee, Larsen & Toubro Limited, Faridabad,
7
+ Haryana, India.
8
+ a)iden@jinr.ru
9
+ b)rishav160999@gmail.com
10
+ Abstract
11
+ The space-time picture of hadron formation in high-energy collisions with nuclear
12
+ targets is still poorly known.
13
+ The tests of hadron formation was suggested for the
14
+ first stage of SPD running.
15
+ They will require measuring charged pion and proton
16
+ spectra with the precision better than 10%. A research has been carried out to check
17
+ feasibility of such studies at SPD. In this work, 12C − 12C and 40Ca − 40Ca heavy ion
18
+ collisions at center of mass energy of 11 GeV/nucleon were simulated using the SMASH
19
+ event generator. Firstly, the generator-level events were studied. The distribution of
20
+ track multiplicities and momentum distributions of different types of charged particles
21
+ were obtained. Secondly, the generated events passed through the full reconstruction
22
+ using the SpdRoot framework.
23
+ At this stage particles were identified using dE/dx
24
+ measurement and time-of-flight information. It allowed us to estimate charge track
25
+ multiplicities in the tracking system and purities of charge particles spectra. The results
26
+ on multiplicity are important to estimate occupancies in the tracking system, while the
27
+ results on the pion and proton momentum spectra show that particle identification
28
+ should be acceptable for validation of hadron formation models. This is the first study
29
+ of moderate ion collisions for the SPD Collaboration.
30
+ Keywords:
31
+ Hadron formation effects, Heavy ion collision, SMASH, NICA-SPD, Rapidity,
32
+ Charged track multiplicity, Particle physics event generator.
33
+ 1
34
+ arXiv:2301.00997v1 [physics.ins-det] 3 Jan 2023
35
+
36
+ 1
37
+ INTRODUCTION
38
+ The SPD detector is primarily optimized to study spin dependent gluon structure of proton and
39
+ deuteron using open charm production, charmonia production and prompt photons. At the same
40
+ time, its physics program includes studies of various aspects of QCD. The work is devoted to studies
41
+ of hadron formation in nuclear collisions proposed in Ref. [1].
42
+ Hadrons produced in hadron collisions emerge in the form of prehadrons, which interact with
43
+ nucleons with reduced strength. This suppression is poorly known and is described in model de-
44
+ pendent way. This suppression results in different spectra of final particles as is illustrated in Fig.1
45
+ for rapidity distributions (in a similar way it affects the pT spectrum). Naturally, these spectra can
46
+ be used to study hadron formation effects. The required precision of such measurements is 10%.
47
+ The aim of this work is to evaluate feasibility of such measurements with MC simulation. Here,
48
+ ion collisions of 12C − 12C and 40Ca − 40Ca at √s = 11AGeV were generated using the SMASH
49
+ (Simulating Many Accelerated Strongly-interacting Hadrons) event generator. Afterwards, the the
50
+ full simulation and reconstruction was performed using the SpdRoot framework.
51
+ Figure 1: Rapidity spectra of protons and charged pions in 12C − 12C and 40Ca − 40Ca collisions.
52
+ 2
53
+
54
+ 12C + 12C, s= 11 GeV
55
+ 40Ca + 40Ca, sN = 11 GeV
56
+ 105
57
+ 106
58
+ Protons
59
+ Protons
60
+ w/oformation
61
+ w/oformation
62
+ default
63
+ default
64
+ QDM
65
+ QDM
66
+ do/dy, mb
67
+ 104
68
+ peut = 2 GeV/c -
69
+ Peut = 2 GeV/c -
70
+ Peut = 1 GeV/c
71
+ Peut = 1 GeV/c
72
+ 103
73
+ 104
74
+ 102
75
+ 103
76
+ 4 -3 -2 -1
77
+ 0
78
+ 1
79
+ 2
80
+ 3
81
+ 4
82
+ 4
83
+ -3 -2 -1
84
+ 0
85
+ 1
86
+ 2
87
+ 3
88
+ 4
89
+ y
90
+ y
91
+ 12C + 12C, sN= 11 GeV
92
+ 40Ca + 40Ca, sNR = 11 GeV
93
+ 104
94
+ 105
95
+ do/dy, mb
96
+ do/dy, mb
97
+ 103
98
+ 104
99
+ w/oformation
100
+ w/oformation
101
+ default
102
+ default
103
+ QDM
104
+ QDM
105
+ Peut = 2 GeV/c
106
+ Peut = 2 GeV/c
107
+ Pcut = 1 GeV/c
108
+ pcut = 1 GeV/c
109
+ 102
110
+ 103
111
+ 4 -3 -2
112
+ -1
113
+ 0
114
+ 1
115
+ 2
116
+ 3
117
+ 4
118
+ 4 -3 -2 -1
119
+ 0
120
+ 1
121
+ 2
122
+ 3
123
+ 4
124
+ y
125
+ y2
126
+ NICA FACILITY
127
+ The NICA (Nuclotron based Ion Collider fAcility) collider at Joint Institute for Nuclear Research
128
+ in Dubna is is being built to provide beams for two experiments. The first experiment, MPD (Multi
129
+ Purpose Detector), will study properties of dense baryonic matter (matter present at extreme high
130
+ density in QCD phase diagram) like Quark Gluon Plasma. The second experiment, SPD (Spin
131
+ Physics Detector), is devoted to study of spin related phonomena and QCD. Once the NICA collider
132
+ will be operational, scientists will be able to create a special state of matter in laboratory which
133
+ existed for very short interval of time (˜20µ sec) just after the big bang. This special state is called
134
+ as QGP (Quark Gluon Plasma) and it filled the entire universe shortly after the big bang.
135
+ The main parts of NICA facility consists of two independent injector complex (injector for light
136
+ ions, and injector for heavy ions-KRION 6T), Light Ion Linear Accelerator (LU20) for accelerating
137
+ light ions like protons (H+), deutrons, and α-particles upto 5 MeV of K.E, then Heavy Ion Linear
138
+ Accelerator (HILAC) to accelerate heavy ions upto Au to a maximum K.E of 3.2 MeV/n, then a
139
+ Super Conducting (SC) Booster Synchrotron to create ultra high vacuum and to provide complete
140
+ stripping of heavy ions, then a SC Heavy Ion Synchrotron Nuclotron to accelerate both light and
141
+ heavy ions to required beam energy. The accelerated beams will collide at two different locations
142
+ where MPD detector and SPD detector are being built. The schematic view of NICA complex is
143
+ shown in Fig.2.
144
+ Figure 2: Schematic view of NICA complex.
145
+ 3
146
+ SPD DETECTOR
147
+ The Spin Physics Detector [2,3] is a 4π universal detector optimized to study spin-related phenomena
148
+ via open charm, charmonia and promopt photons in the collisions of polarized p-p or d-d beams
149
+ with √sNN up to 27 GeV. However, at first stage of NICA-SPD, the expected collision energy
150
+ will be from 3.4 up to 10 GeV, and later on after first upgrade, it is expected to reach upto 27
151
+ GeV. The general layout depicting isometric projection of SPD setup is shown in Fig.3. The main
152
+ parts involved in advanced tracking and particle identification capabilities have been shown. (i)
153
+ The beam pipe passes through the center of the detector, carries the accelerated beams of ions. (ii)
154
+ The MicroMegas detector is to improves the momentum resolution and tracking efficiency of the
155
+ tracking system. (iii) The Straw Tracker (ST) detector is for the reconstruction of the primary and
156
+ 3
157
+
158
+ BM@N Detector
159
+ SPD
160
+ Transport Channel
161
+ HILAC
162
+ Collider
163
+ LU20
164
+ Booster
165
+ -MPD
166
+ Nuclotronsecondary particle tracks and for determination of their momenta. (iv) The Time Of Flight (TOF)
167
+ detector, is a part of Particle Identification (PID) system, and is used for identification of particles
168
+ like π, k, and p with long trajectories. (v) The magnet system shown by red color provides 1T
169
+ of magnetic field along the beam axis. This setup is limited to first stage of SPD operation, and
170
+ will be considered only for the identification of stable charged particles. Neutral particles, like n0,
171
+ photons will be detected at later stages. The main parts of SPD first stage have been explained in
172
+ detail below. There is a possibility to have TOF system for the first stage studies.
173
+ Figure 3: Layout of the SPD setup proposed for first stage at NICA-SPD.
174
+ 3.1
175
+ CENTRAL TRACKER
176
+ The innermost detector of SPD consists of a MicroMegas-based Central Tracker (MCT). Its purpose
177
+ is to identify the primary vertex coordinate and to improve momentum resolution and tracking
178
+ efficiency.
179
+ It is based on MicroMegas (Micro Mesh Gaseous Structure) technology and detects
180
+ charged particle by amplifying the charges produced due to ionization of the gas molecules present
181
+ in detector volume. When an ionizing particle track passes through detector volume, it ionizes the
182
+ gas molecules and creates few hundreds of e−-ion pair. Electrons are accelerated opposite to the
183
+ direction of applied electric field of 600 V/cm in ionization gap, while ions are attracted towards
184
+ cathode. When the e− crosses micromesh, it faces intense electric field (> 30 KV/cm) and gains
185
+ enough energy to ionize other gas molecules in its path. During this process an avalanche of e−-ion
186
+ pair is produced (1e− produces 104 e−-ion pairs) which is significant to create an electronic signal
187
+ which is read out by readout electrodes.
188
+ 3.2
189
+ STRAW TRACKER
190
+ ST is mainly for the reconstruction of primary and secondary particle tracks and measuring their
191
+ momenta, but also participates in identification of π, K, and p on via energy deposit (dE/dx)
192
+ measurements. It consists of two major parts - barrel (covers radius from 270 to 850 mm) and two
193
+ end-caps. The barrel is divided into 8 modules enclosed in a carbon fiber capsule. Each module has
194
+ 30 double layers of straw tubes (dia 1cm) which runs parallel (long straw tubes) and perpendicular
195
+ 4
196
+
197
+ Straw tracker
198
+ Magnet
199
+ Range system
200
+ MicroMegas Endcap
201
+ RangesystemEndcap
202
+ MicroMegas
203
+ Beam-beamcounter
204
+ Beam pipe
205
+ Strawtracker Endcap
206
+ zoomx4
207
+ Zero degree calorimeter(short straw tubes) to the beam axis and contains 1500 and 6000 parallel and perpendicular straw
208
+ tubes respectively. Straw tubes are made of polyethylene terephthalate and outer surface is coated
209
+ with very thin layer of Cu and Au. Carbon capsule is meant to protect the outer surface of these
210
+ tubes from humidity. One side and two opposite ends of capsule are provided with small holes
211
+ where end plugs are fixed. FEE are connected to these end plugs to read the detector signal. Any
212
+ particle which passes through the long straws will send detector signal to both opposite ends while a
213
+ particle passing through short straw will send detector signal to any one side of capsule where FEE
214
+ is attached. Thus, long straws will be read from two opposite ends while short straws will be read
215
+ from one side. The end-caps of ST are divided into 3 modules and each module has 4 hexadecimal
216
+ cameras (U, V, X, Y) to record the four coordinates of any physical quantity like four-momentum.
217
+ The FEE to be used can be similar to the one used at NA64 experiment (for the search of dark
218
+ matter), or DUNE experiment (to detect and study properties of neutrino).
219
+ 3.3
220
+ TIME OF FLIGHT DETECTOR
221
+ TOF detector is the part of PID system. Similar to ST, the TOF provides identification of π, k, and
222
+ p by measuring their flight time. The energy loss data registered by ST can be used together with the
223
+ data from TOF for correct identification of particle tracks. The TOF distinguishes charged particles
224
+ (mainly π and k) in the momentum range up to 1.5 GeV. The major parts of TOF comprises of a
225
+ barrel and two end-caps. For the first stage of NICA-SPD, two different designs of TOF has been
226
+ suggested. First one is TOF based on multigap timing Resistive Plate Chambers (mRPC), which
227
+ will consist 220 rectangular plate chambers (160 for the barrel and 30 each for end-caps). Second
228
+ one is based on Plastic Scintillator Tiles and will comprise 10.1K small scintillator tiles (7.4K for
229
+ barrel and 1.4K for each end-caps). Scintillator has a property of emitting light in visible region
230
+ when an ionizing radiation passes through it. So, in this design when a particle passes through
231
+ TOF, scintillated photons are produced which are detected by four Si Photo Multipliers (SiPMs)
232
+ present at each sensor board attached at two extreme ends of scintillator tile.
233
+ 4
234
+ EVENT GENERATION
235
+ 12C −12C and 40Ca−40Ca heavy ion collisions at √s = 11 AGeV with maximum impact parameter
236
+ set to 8 fm for C-C and 11 fm for Ca-Ca were simulated using SMASH. The fermi motion was
237
+ assumed to be “frozen” and 100K events were generated for each heavy ion collision. The SMASH
238
+ input file for C-C collision is shown below.
239
+ *********** SMASH INPUT ************
240
+ config.yaml file for C-C collision.
241
+ Logging:
242
+ default: INFO
243
+ General:
244
+ Modus:
245
+ Collider
246
+ Time_Step_Mode: Fixed
247
+ Delta_Time:
248
+ 0.1
249
+ End_Time:
250
+ 200.0
251
+ Randomseed:
252
+ -1
253
+ Nevents:
254
+ 100000
255
+ 5
256
+
257
+ Output:
258
+ Output_Interval: 10.0
259
+ Particles:
260
+ Format:
261
+ ["Oscar2013"]
262
+ Modi:
263
+ Collider:
264
+ Projectile:
265
+ Particles: {2212: 6, 2112: 6} #C-12
266
+ Target:
267
+ Particles: {2212: 6, 2112: 6} #C-12
268
+ Sqrtsnn: 11.0
269
+ Impact:
270
+ Sample: "quadratic"
271
+ Range: [0.0, 8.0]
272
+ Fermi_Motion: "frozen"
273
+ ************************************
274
+ Multiplicity of generated charged particles for C − C and Ca − Ca collisions are shown in
275
+ Fig. 4. The peaks at 12 for 12C+12C collisions and at 40 for 40Ca+40Ca collisions correspond to
276
+ events where no interaction occurred. The rapidity distributions are shown in Fig. 5. The spectra
277
+ obtained from SMASH output show qualitative agreement with the ones in Fig. 1. Peaks for protons
278
+ correspond to particles moving close to the initial beam direction. Moreover, fractions of different
279
+ particle types can be estimated. It can be seen that for |y| < 2 (i.e. within the acceptance of the
280
+ detector) charge particles are dominated by pions. Apart from p±, π±, & K±, marginal numbers
281
+ of sigmas, cascades, and omegas were also generated. The PID efficiency depends on the particle
282
+ momentum.
283
+ The momentum spectra for protons, pions and kaons are shown in Fig. 6 in the
284
+ midrapidity region (|y| < 0.5 for which theoretical predictions has been given) Most of the pions
285
+ have momentum below 0.8 GeV and protons - below 1 GeV. It means that types of these particles
286
+ should be well resolved by dE/dx measurements. When studying pion or proton spectra, there is
287
+ high probability of kaon/pion misidentification, but fraction of such events is strongly suppressed
288
+ by small initial kaon numbers.
289
+ 6
290
+
291
+ (a)
292
+ (b)
293
+ Figure 4: Generator-level multiplicity of charged particles for 12C − 12C collision (a) and 40Ca − 40Ca collisions (b).
294
+ (a)
295
+ (b)
296
+ Figure 5: Rapidity distribution of charged particles in 12C − 12C (a) and 40Ca − 40Ca (b) collision.
297
+ 7
298
+
299
+ Total Multiplicity of Charged Particles, C-12 + C-12
300
+ 104
301
+ No. of events
302
+ 103
303
+ 102
304
+ 10
305
+ 0
306
+ 10
307
+ 20
308
+ 30
309
+ 40
310
+ 50
311
+ 60
312
+ 70
313
+ 80
314
+ 90
315
+ 100
316
+ No. of charged particlesTotal Multiplicity of Charged Particles, Ca-40 + Ca-40
317
+ 104
318
+ No. of events
319
+ 103
320
+ 0
321
+ 10
322
+ 20
323
+ 30
324
+ 40
325
+ 50
326
+ 60
327
+ 70
328
+ 80
329
+ 90
330
+ 100
331
+ No. of charged particlesRapidity distribution of charged particles, C-12 + C-12
332
+ - protons
333
+ 105
334
+ .. pions
335
+ . kaons
336
+ 104
337
+ No. of charged particles
338
+ 103
339
+ 102
340
+ 10
341
+ 3
342
+ 2
343
+ 3
344
+ 5
345
+ Rapidity of charaed particles (yRapidity distribution of charged particles, Ca-40 + Ca-40
346
+ 106
347
+ protons
348
+ pions
349
+ 105
350
+ kaons
351
+ No. of charged particles
352
+ 104
353
+ 103
354
+ 102
355
+ 10
356
+ 5
357
+ 3
358
+ 2
359
+ Y
360
+ Rapidity of charged particles (y)(a) p distribution of p± in 12C − 12C collision.
361
+ (b) p distribution of p± in 40Ca − 40Ca collision.
362
+ (c) p distribution of π± in 12C − 12C collision.
363
+ (d) p distribution of π± in 40Ca − 40Ca collision.
364
+ (e) p distribution of k± in 12C − 12C collision.
365
+ (f) p distribution of k± in 40Ca − 40Ca collision.
366
+ Figure 6: Total momentum distribution of protons, pions, and kaons at generator level in 12C − 12C and 40Ca− 40Ca collision.
367
+ 8
368
+
369
+ Total momentum distribution of pions, C-12 + C-12
370
+ 16000
371
+ 14000
372
+ 12000
373
+ pions
374
+ 10000
375
+ 8000
376
+ No.
377
+ 6000
378
+ 4000
379
+ 2000
380
+ 0
381
+ 0
382
+ 0.2
383
+ 0.4
384
+ 0.6
385
+ 0.8
386
+ 1.2
387
+ 1.4
388
+ 1.6
389
+ 1.8
390
+ 2
391
+ Total momentum of pions (p)Total momentum distribution of pions, Ca-40 + Ca-40
392
+ 60000
393
+ 50000
394
+ 40000
395
+ ON
396
+ 30000
397
+ 20000
398
+ 10000
399
+ 0.2
400
+ 0.4
401
+ 0.6
402
+ 0.8
403
+ 1.2
404
+ 1.4
405
+ 1.6
406
+ 1.8
407
+ 2
408
+ Total momentum of pions (p)Total momentum distribution of kaons, C-12 + C-12
409
+ 1000
410
+ 800
411
+ of kaons
412
+ 600
413
+ No.
414
+ 400
415
+ 200
416
+ 0
417
+ 0.2
418
+ 0.4
419
+ 0.6
420
+ 0.8
421
+ 1.2
422
+ 1.4
423
+ 1.6
424
+ 1.8
425
+ 2
426
+ Total momentum of kaons (p)Total momentum distribution of kaons, Ca-40 + Ca-40
427
+ 4000
428
+ 3500
429
+ 3000
430
+ kaons
431
+ 2500
432
+ 2000
433
+ No.
434
+ 1500
435
+ 1000
436
+ 500
437
+ 0
438
+ 0
439
+ 0.2
440
+ 0.4
441
+ 0.6
442
+ 0.8
443
+ 1.4
444
+ 1.6
445
+ 1.8
446
+ 2
447
+ Total momentum of kaons (p)Total momentum distribution of protons, C-12 + C-12
448
+ 1600
449
+ 1400
450
+ 1200
451
+ protons
452
+ 1000
453
+ 800
454
+ ON
455
+ 600
456
+ 400
457
+ 200
458
+ 0.2
459
+ 0.4
460
+ 0.6
461
+ 0.8
462
+ 1.2
463
+ 1.4
464
+ 1.6
465
+ 1.8
466
+ 2
467
+ 2.2
468
+ Total momentum of protons (p)Total momentum distribution of protons, Ca-40 + Ca-40
469
+ 6000
470
+ 5000
471
+ of protons
472
+ 4000
473
+ No.
474
+ 3000
475
+ 2000
476
+ 1000
477
+ 0.2
478
+ 0.4
479
+ 0.6
480
+ 0.8
481
+ 1.4
482
+ 1.6
483
+ 1.8
484
+ 2
485
+ 2.2
486
+ Total momentum of protons (p)5
487
+ DETECTOR SIMULATION AND EVENT RECONSTRUCTION
488
+ The detector simulation and reconstruction was performed with the SpdRoot framework. To read
489
+ SMASH generated events the SpdRoot code was modified and additional C++ class was added.
490
+ During the simulation stage the particles were transported through the detector geometrical model
491
+ using Geant4.
492
+ At the reconstruction stage, Geant4 tracks and vertices were reconstructed and
493
+ particle identification with dE/dx and time of flight measurements was performed. For the PID
494
+ three hypotheses were considered: pion, kaon and proton. The reconstructed ionization energy
495
+ losses and “measured” time of flight were used to construct conditional probabilities (e.g. p(t|pid),
496
+ where t is the measured time and pid is a particle type hypothesis). Out of 100K events generated
497
+ by SMASH, first 1K events were considered for detector simulation due to slow data processing in
498
+ SpdRoot.
499
+ 6
500
+ ANALYSIS
501
+ A physical analysis was performed using C++ codes and ROOT library based on SpdRoot output.
502
+ All tracks reconstructed in the detector with measured momentum were accepted. For the particle
503
+ type the one that gives the largest conditional probability is adopted.
504
+ Multiplicity, as well as
505
+ kinematic distributions for pions, kaons and protons are studied. For particle momentum spectra
506
+ there are no notable differences between C − C and for Ca − Ca collisions, so only the first ones
507
+ will be considered.
508
+ 6.1
509
+ CHARGED TRACK MULTIPLICITY
510
+ The SPD detector set-up is optimized for p − p and d − d collisions. Thus knowing charged track
511
+ multiplicities for ion collisions is important to estimate CT and ST occupancies and feasibility of
512
+ such studies. Fig. 7 shows the total multiplicity of charged particles reconstructed by the tracking
513
+ system in 12C − 12C and 40Ca − 40Ca collisions. The numbers of reconstructed tracks are much
514
+ lower compared to generator-level studies. It is because the geometry of the tracking system is such
515
+ that, tracks with polar angle, θ < 10◦ or > 170◦ do not hit the tracker and passes along the beam
516
+ pipe itself, so such tracks are ignored. Also, there were events without nuclei interactions which
517
+ resulted in no track reconstruction. So, to avoid a large peak at zero due to mentioned reasons, the
518
+ X-axis count starts from 1.
519
+ 6.2
520
+ PION MOMENTUM SPECTRUM (12C − 12C)
521
+ The spectra of particles identified as pions separately by ionization losses and by TOF are shown in
522
+ Fig. 8 separately. The spectra show resemblance with the generator plot of pion momentum distri-
523
+ bution. Based pn MC-truth information backround from misidentification other charged particles
524
+ (K±, p±, e±, & µ±) is studied. The obtained distribution for “pions identified as pions” only slightly
525
+ deviates from distribution of all selected pion candidates. The estimated relative contamination of
526
+ the pion spectra is shown in Fig. 9. It can seen that purity above 90% can be obtained up to
527
+ 1.2 GeV using either dE/dx or TOF measurements.
528
+ 9
529
+
530
+ Figure 7: Charged track multiplicity reconstructed by in 12C − 12C (left), 40Ca − 40Ca (right) collisions (shown by red) and
531
+ number of particles for which TOF information is available (shown by blue).
532
+ (a) Total momentum distribution of reconstructed charged particles
533
+ identified as π± by ionization losses.
534
+ (b) Total momentum distribution of reconstructed charged particles
535
+ identified as π± by TOF.
536
+ Figure 8: Total momentum distribution of reconstructed π± candidates in 12C − 12C collision (Detector level).
537
+ Figure 9: Purity of the selected pion candidates as a function of their momentum.
538
+ 10
539
+
540
+ Total multiplicity of charged particles passing through tracking system, C12-C12
541
+ 60
542
+ TOF
543
+ 50
544
+ ST
545
+ 40
546
+ events
547
+ 30
548
+ NO.
549
+ 20
550
+ 10
551
+
552
+ 10
553
+ 20
554
+ 30
555
+ 40
556
+ 50
557
+ 60
558
+ 70
559
+ 80
560
+ 90
561
+ 10090
562
+ TOF
563
+ 80
564
+ ST
565
+ 70
566
+ events
567
+ 60
568
+ 50
569
+ No.
570
+ 40
571
+ 30
572
+ 20
573
+ 10
574
+ 0
575
+ 10
576
+ 20
577
+ 30
578
+ 40
579
+ 50
580
+ 60
581
+ 70
582
+ 80
583
+ 90
584
+ 100Charged Particles ldentified as Pions by ST, C12-C12
585
+ 350
586
+ Pions identified as pions
587
+ Kaons identified as pions
588
+ Protons identified as pions
589
+ 300
590
+ Electrons identified as pions
591
+ Muons identified as pions
592
+ 250
593
+ Chargedparticlesidentifiedaspions
594
+ 200
595
+ 150
596
+ 100
597
+ 50
598
+ 0
599
+
600
+ 0.2
601
+ 0.4
602
+ 0.6
603
+ 0.8
604
+ 1.2
605
+ 1.4
606
+ 1.6
607
+ 1.8
608
+ 2
609
+ p(GeV/c)Charged Particles ldentified as Pions by TOF, C12-C12
610
+ Pions identified as pions
611
+ 300
612
+ Kaons identified as pions
613
+ Protons identified as pions
614
+ 250
615
+ Electrons identified as pions
616
+ Muons identified as pions
617
+ Charged particles identified as pions
618
+ 200
619
+ Counts
620
+ 150
621
+ 100
622
+ 50
623
+ 0
624
+ 0.2
625
+ 0.4
626
+ 0.6
627
+ 0.8
628
+ 1.2
629
+ 1.4
630
+ 1.6
631
+ 1.8
632
+ 2
633
+ p(GeV/c)Pion spectra precision, C12-C12
634
+ 0.8
635
+ 0.6
636
+ Counts
637
+ 0.4
638
+ 0.2
639
+ Precision recordedbyTOF
640
+ Precision recorded by ST
641
+ 0
642
+ 0.2
643
+ 0.4
644
+ 0.6
645
+ 0.8
646
+ 1.2
647
+ 1.4
648
+ 1.6
649
+ 1.8
650
+ p(GeV/c)6.3
651
+ KAON MOMENTUM SPECTRUM (12C − 12C)
652
+ The kaon momentum spectrum was explicitly mentioned among observables to study hadron for-
653
+ mation effects in nuclei. Nevertheless, kaon production may be interesting for the reasons. The
654
+ obtained spectra of kaon candidates is shown in Fig. 10 separately for ionization losses and TOF.
655
+ First of all, the shown data lack statistics. Secondly, it can bee seen that there is a huge contamina-
656
+ tion from misidentified pions. This is explained by very small fraction of generated kaons and the
657
+ fact that probability to select misidentified particle is proportional to their number. The relative
658
+ fraction of correctly identified kaons in shown in Fig. 11.
659
+ (a) Total momentum distribution of reconstructed charged particles
660
+ identified as K± by ionization losses.
661
+ (b) Total momentum distribution of reconstructed charged particles
662
+ identified as K± by TOF.
663
+ Figure 10: Total momentum distribution of reconstructed K± candidates in 12C − 12C collision (Detector level).
664
+ Figure 11: Purity of the selected kaon candidates as a function of their momentum.
665
+ 11
666
+
667
+ Charged Particles ldentified as Kaons by ST, C12-C12
668
+ Kaons identified as kaons
669
+ 60
670
+ Pions identified as kaons
671
+ Protons identified as kaons
672
+ Electrons identified as kaons
673
+ 50
674
+ Muons identified as kaons
675
+ Charged particles identified askaons
676
+ 40
677
+ Counts
678
+ 30
679
+ 20
680
+ 10
681
+ 0
682
+ 0.2
683
+ 0.4
684
+ 0.6
685
+ 0.8
686
+ 1.2
687
+ 0
688
+ 1.4
689
+ 1.6
690
+ 1.8
691
+ 2
692
+ p(GeV/c)Charged Particles ldentified as Kaons by TOF, C12-C12
693
+ 25
694
+ Kaons identified as kaons
695
+ Pions identified as kaons
696
+ Protons identifiedaskaons
697
+ 20
698
+ Electrons identified as kaons
699
+ Muons identified as kaons
700
+ Charged particles identified as kaons
701
+ 15
702
+ Counts
703
+ 10
704
+ 5
705
+ 0
706
+ 0.2
707
+ 0.4
708
+ 0.6
709
+ 0.8
710
+ 1.2
711
+ 1.4
712
+ 1.6
713
+ 1.8
714
+ 0
715
+ 2
716
+ p(GeV/c)Kaon spectra precision, C12-C12
717
+ Precision recorded by TOF
718
+ 0.9
719
+ Precision recorded by ST
720
+ 0.8
721
+ 0.7
722
+ 0.6
723
+ unts
724
+ 0.5
725
+ Col
726
+ 0.4
727
+ 0.3
728
+ 0.2
729
+ 0.1
730
+ 0
731
+ 0.2
732
+ 0.4
733
+ 0.6
734
+ 0.8
735
+ 1.2
736
+ 1.4
737
+ 1.6
738
+ 1.8
739
+ 2
740
+ p(GeV/c)6.4
741
+ PROTON MOMENTUM SPECTRUM (12C − 12C)
742
+ Finally, proton momentum spectra have been considered. In this study protons and antiprotons were
743
+ considered together, but the fraction of produced antiprotons is negligible. The proton candidate
744
+ distributions and the contributions from misidentification are shown in Fig. 12. The purity of the
745
+ selected samples is shown in Fig. 13. It can be seen dE/dx measurements alone will not allow
746
+ precise determination of proton spectrum. The reasonably good results can be expected only in
747
+ case of combined identification by ionization losses and TOF system.
748
+ (a) Total momentum distribution of reconstructed charged particles
749
+ identified as p± by ionization losses.
750
+ (b) Total momentum distribution of reconstructed charged particles
751
+ identified as p± by TOF.
752
+ Figure 12: Total momentum distribution of reconstructed p± candidates in 12C − 12C collision (Detector level).
753
+ Figure 13: Purity of the selected proton candidates as a function of their momentum.
754
+ 12
755
+
756
+ Charged Particles ldentified as Protons by ST, C12-C12
757
+ 90
758
+ Protons identified as protons
759
+ Kaons identified as protons
760
+ 80
761
+ Pions identified as protons
762
+ Electrons identified as protons
763
+ Muons identified as protons
764
+ 70
765
+ Charged particles identified as protons
766
+ 60
767
+ Counts
768
+ 50
769
+ 40
770
+ 30
771
+ 20
772
+ 10
773
+ 0
774
+ 0.5
775
+ 1
776
+ 1.5
777
+ 2
778
+ 2.5
779
+ 3
780
+ 3.5
781
+ 4
782
+ 4.5
783
+ 0
784
+ 5
785
+ p(GeV/c)Charged Particles ldentified as Protons by TOF, C12-C12
786
+ 90
787
+ Protons identified as protons
788
+ Kaons identified as protons
789
+ 80
790
+ Pions identified as protons
791
+ Electrons identified asprotons
792
+ 70
793
+ Muons identified as protons
794
+ Charged particles identified as protons
795
+ 60
796
+ Counts
797
+ 50
798
+ 40
799
+ 30
800
+ 20
801
+ 10
802
+ 0.5
803
+ 1.5
804
+ 2
805
+ 2.5
806
+ 3
807
+ 3.5
808
+ 4
809
+ 4.5
810
+ 0
811
+ 5
812
+ p(GeV/c)Proton spectra precision, C12-C12
813
+ 0.8
814
+ Counts
815
+ 0.6
816
+ 0.4
817
+ 0.2
818
+ Precision recorded by TOF
819
+ Precision recorded by ST
820
+ 0.5
821
+ 1.5
822
+ 2
823
+ 2.5
824
+ 3
825
+ 3.5
826
+ 4
827
+ 4.5
828
+ 5
829
+ p(GeV/c)7
830
+ SUMMARY
831
+ The goal of this work was to check the feasibility of hadron formation effects studies at the first
832
+ stage of SPD operation. For this purpose an analysis of 12C − 12C and 40Ca − 40Ca collisions were
833
+ performed at the generator level and then the full event reconstruction was done at detector level.
834
+ The multiplicity distributions indicate that occupancies of tracking detectors should be checks.
835
+ Part of the events with high number of charged tracks may not be fully reconstructed. Particle
836
+ identification with ionization losses and TOF was considered separately (for future dE/dx only or
837
+ their combination can be expected). The purity of the measured charged pion distribution for both
838
+ types of ion collisions using dE/dx only is rather good and meets mentioned before requirements. In
839
+ case of combination of information from ionization losses and time of flight system purity of proton
840
+ distribution may be improved.
841
+ References
842
+ [1] V. V. Abramov, A. Aleshko, V. A. Baskov, E. Boos, V. Bunichev, O. D. Dalkarov, R. El-Kholy,
843
+ A. Galoyan, A. V. Guskov and V. T. Kim, et al. Phys. Part. Nucl. 52 (2021) no.6, 1044-1119
844
+ doi:10.1134/S1063779621060022 [arXiv:2102.08477 [hep-ph]].
845
+ [2] V. M. Abazov et al. [SPD proto], [arXiv:2102.00442 [hep-ex]].
846
+ [3] SPD TDR [unpublished].
847
+ 13
848
+
9NE1T4oBgHgl3EQf7wX0/content/tmp_files/2301.03539v1.pdf.txt ADDED
@@ -0,0 +1,1782 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1
2
+ Federated Coded Matrix Inversion
3
+ Neophytos Charalambidesµ, Mert Pilanciσ, and Alfred O. Hero IIIµ
4
+ .µEECS Department University of Michigan .σEE Department Stanford University
5
+ Email: neochara@umich.edu, pilanci@stanford.edu, hero@umich.edu
6
+ Abstract
7
+ Federated learning (FL) is a decentralized model for training data distributed across client devices. Coded computing (CC) is
8
+ a method for mitigating straggling workers in a centralized computing network, by using erasure-coding techniques. In this work
9
+ we propose approximating the inverse of a data matrix, where the data is generated by clients; similar to the FL paradigm, while
10
+ also being resilient to stragglers. To do so, we propose a CC method based on gradient coding. We modify this method so that the
11
+ coordinator does not need to have access to the local data, the network we consider is not centralized, and the communications which
12
+ take place are secure against potential eavesdroppers.
13
+ I. INTRODUCTION AND RELATED WORK
14
+ Inverting a matrix is one of the most important operations in numerous applications, such as, signal processing, machine
15
+ learning, and scientific computing [2], [3]. A common way of inverting a matrix is to perform Gaussian elimination, which
16
+ requires O(N 3) operations for square matrices of order N. In high-dimensional applications, this can be cumbersome. Over the
17
+ past few years the machine learning (ML) community has made much progress on federated learning (FL), focusing on iterative
18
+ methods.
19
+ The objective of FL is to leverage computation, communication and storage resources to perform distributed computations for
20
+ ML models, where the data of each federated worker is never shared with the coordinator of the network; that aggregates local
21
+ computations in order to update the model parameters. In FL applications it is important that the data is kept private and secure.
22
+ Distributed computations in the presence of stragglers (workers who fail to compute their task or have longer response time than
23
+ others) must account for the effect of non-responsive workers. Coding-theoretic approaches have been adopted for this purpose
24
+ [4], [5], and fall under the framework of coded computing (CC). Data security is also an increasingly important issue in CC [6].
25
+ Despite the fact that multiplication algorithms imply inversion algorithms and vice versa, in the context of CC; matrix inversion
26
+ has not been studied as extensively as coded matrix multiplication (CMM) [7]. The main reason for this is the fact that the latter
27
+ is non-linear and non-parallelizable as an operator. We point out that distributed inversion algorithms do exist, though these make
28
+ assumptions on the matrix, are specific for distributed and parallel computing platforms, and require a matrix factorization; or
29
+ heavy and multiple communication instances between the servers and the coordinator.
30
+ In [1] a CC method1 was proposed based on gradient coding (GC) [8], which approximates the inverse of a matrix A. In order
31
+ to overcome the obstacle of non-linearity, the columns of A−1 are approximated. When assuming floating-point arithmetic, this
32
+ CCM introduces no numerical nor approximation errors. Note that GC and not CMM was utilized, as the latter does not require
33
+ the encoding to be done locally by the workers.
34
+ Though the two areas of FL and CC seem to be closely related, on the surface they appear incompatible. For instance, in CC
35
+ one often assumes there is a master server that distributes the data and may perform the encoding (encoding by the master server
36
+ is done in CMM, but not in GC), while in FL the central coordinator never has access to the distributed local training data; which
37
+ are located at different client nodes or workers.
38
+ There are a few recent works that leverage CC in order to devise secure FL methods for distributed regression and iterative
39
+ optimization [9]–[14]. In this work, we combine optimization and CC, using erasure coding to protect against stragglers as in
40
+ CC and locally approximating the inverse without revealing the data to the coordinator, to design a FL scheme. Our approach,
41
+ is based on the coded matrix inversion method (CMIM) we develop, which utilizes balanced Reed-Solomon (BRS) codes [15],
42
+ [16]. This results in an efficient decoding in terms of the threshold number of responsive workers needed to perform an error free
43
+ computation. We show that the general class of maximum distance separable (MDS) generator matrices could be used to generate
44
+ a suitable erasure code (Theorem 6). The focus is on BRS codes, which have the following advantages:
45
+ (i) minimum redundancy per job across the network,
46
+ (ii) they optimize communication from workers to the master,
47
+ (iii) we can efficiently decode the resulting method.
48
+ Our CMIM can also be used to compute the Moore–Penrose pseudoinverse Y† of a data matrix Y ∈ RM×N for M ≫ N,
49
+ which is more general than inverting a square matrix. By using the fact that Y† = (Y⊤Y)−1Y⊤, the bottleneck is computing
50
+ the inverse of A = Y⊤Y. In addition, two more matrix multiplications need to take place distributively: computing A before
51
+ the inversion; and �
52
+ A−1Y⊤ after the inverse has been approximated. The matrix products can be computed distributively using
53
+ A preliminary version also considers approximating A† [1], in the CC setting. This work was partially supported by grants ARO W911NF-15-1-0479 and Dept
54
+ of Energy DE-NA0003921.
55
+ 1We abbreviate ‘coded computing method/methods’ to CCM/CCMs.
56
+ arXiv:2301.03539v1 [cs.IT] 9 Jan 2023
57
+
58
+ 2
59
+ various CCMS, e.g. we can use a modification of the coded FL approaches of [11] and a CMM from [17]; both of which are based
60
+ on GC. For the remainder of the paper, we focus on the generic problem of inverting a square matrix A.
61
+ The proposed FL approach applies to general linear regression problems. Compared to previous FL iterative approaches [18],
62
+ the difference is that for Yθ = p; with p the label vector and θ the model parameters, the pseudoinverse-regularized regression
63
+ solution is ˆθ = �
64
+ Y†p. Unlike conventional FL methods, this regularized regression can be computed non-iteratively. The non-
65
+ iterative nature of the proposed approach is advantageous in settings such as Kalman filtering, where the matrix inverse must be
66
+ updated in real time as measurements come in.
67
+ The paper is organized as follows. In II we recall basic facts on matrix inversion, least squares approximation and finite fields.
68
+ In III we review BRS codes, and prove two key lemmas regarding their generator matrices. In IV we present the matrix inverse
69
+ approximation algorithm we utilize in our CCM. The main contribution is presented in V. Our federated approach is split into four
70
+ phases, which we group in pairs of two. First, we discuss information sharing from the coordinator to the workers (we consider
71
+ all the clients’ servers as the network’s workers), and then information sharing between the workers. Second, we show how our
72
+ inversion algorithm can be incorporated in linear CCMs, and describe how this fits into the FL paradigm. Concluding remarks and
73
+ future work are presented in VI.
74
+ A. Overview of the Coded Matrix Inversion Method
75
+ In CC the computational network is centralized, and is comprised of a master server who communicates with n workers. The
76
+ idea behind our approximation is that the workers use a least squares solver to approximate multiple columns of A−1, resulting
77
+ in a set of local approximations to submatrices of �
78
+ A−1, which we refer to as blocks. We present approximation guarantees and
79
+ simulation results for steepest descent (SD) and conjugate gradient (CG) iterative optimization methods. By locally approximating
80
+ the columns in this way, the workers can linearly encode the blocks of �
81
+ A−1. The clients have a block of data {Aι}k
82
+ ι=1, which
83
+ constitute the data matrix A =
84
+
85
+ A1 · · · Ak
86
+
87
+ . To simplify our presentation, we assume that each local data block is of the same
88
+ size; i.e. Aι ∈ RN×T for T = N/k, and that client i has ni servers. Therefore, the total number of servers is n = �k
89
+ j=1 nj.
90
+ We assume the blocks are of the same size, so that the encodings carried out by the clients are consistent. In V, we show that this
91
+ assumption is not necessary. Moreover, for the CCM, it is not required that the number of blocks equal the number of clients. For
92
+ a given natural number γ, assume that γ divides T; denoted γ | T (each local data block Aι is further divided into γ sub-blocks).
93
+ In the case where k ∤ N or γ ∤ T, we can pad the blocks of �
94
+ A−1 so that these assumptions are met.
95
+ A limitation of our proposed CMIM, is the fact that each server needs to have full knowledge of A, in order to estimate columns
96
+ of A−1 through a least squares solver. The sensitivity of Gaussian elimination and matrix inversion also requires that all clients
97
+ have knowledge of each others’ data [1]. This limitation is shared by other coded federated learning methods, e.g. CodedPaddedFL
98
+ [11]. In contrast to CC and GC; where a master server has access to all the data, in FL the data is inherently distributed across
99
+ devices, thus GC cannot be applied directly. We also assume that the coordinator does not intercept the communication between
100
+ the clients, otherwise she could recover the local data. Also, we trust that the coordinator will not invert �
101
+ A−1, to approximate A
102
+ — this would be computationally difficult, for N large.
103
+ Before broadcasting the data amongst themselves, the clients encode their block Ai, which guarantees security from outside
104
+ eavesdroppers. When the clients receive the encoded data, they can decrypt and recover A. Then, their servers act as the workers
105
+ of the proposed CMIM and carry out their assigned computations, and directly communicate their computations back to the
106
+ coordinator. Once the recovery threshold (the minimum number of responses needed to recover the computation) is met, the
107
+ approximation �
108
+ A−1 is recoverable.
109
+ B. Coded Federated Learning
110
+ There are few works that leverage CC to devise secure FL schemes. Most of these works have focused on distributed regression
111
+ and iterative methods, which is the primary application for FL [9]–[13]. Below, we describe and compare these approaches to our
112
+ work.
113
+ The authors of [9] proposed coded federated learning, in which they utilize a CMM scheme. Their security relies on the use
114
+ of random linear codes, to define the parity data. Computations are carried out locally on the systematic data, and only the parity
115
+ data is sent to the coordinator. The main drawback compared to our scheme is that each worker has to generate a random encoding
116
+ matrix and apply a matrix multiplication for the encodings, while we use the same BRS generator matrix across the network,
117
+ based on GC, to linearly encode the local computations. The drawback in our case, is that the workers need to securely share their
118
+ data with each other. This is an artifact of the operation (inversion) we are approximating, and is inevitable in the general case
119
+ where A has no structure. Under the FL setting we are considering, where the data is gathered or generated locally and is not
120
+ i.i.d., we cannot make any assumptions on the structure of A.
121
+ In [11], two methods were proposed. CodedPaddedFL combines one-time-padding with GC to carry out the FL task. Some
122
+ of its disadvantages are that a one-time-pad (OTP) needs to be generated by each worker, and that the OTPs are shared with the
123
+ coordinator, which means that if she gets hold of the encrypted data, she can decrypt it, compromising security. Furthermore,
124
+ there is a heavy communication load and the coordinator needs to store all the pads in order to recover the computed gradients. In
125
+
126
+ 3
127
+ the proposed CMIM, the coordinator generates a set of interpolation points, and shares them with the clients. If the coordinator
128
+ can intercept the communication between the workers, she can decrypt the encrypted data blocks. The second method proposed
129
+ in [11], CodedSecAgg, relies on Shamir’s secret sharing (SSS); which is based on polynomial interpolation over finite fields. In
130
+ contrast, our CMIM relies on GC and Lagrange interpolation.
131
+ Lastly, we discuss the method proposed in [13], which is based on the McEliece cryptosystem, and moderate-density parity-
132
+ check codes. This scheme considers a communication delay model which defines stragglers as the workers who respond slower
133
+ than the fastest worker, and time out after a predetermined amount of time ∆. As the iterative SD process carries on, such workers
134
+ are continuously disregarded. Due to this, there is a data sharing step at each iteration, at which the new stragglers communicate
135
+ encrypted versions of their data to the active workers. Our scheme is non-iterative, and has a fixed recovery threshold. Unlike
136
+ some of the works previously mentioned, which guarantee information-theoretic security, the McEliece based systems and our
137
+ approach have computational privacy guarantees.
138
+ C. Lagrange Interpolation Coded Computing Methods
139
+ While there is extensive literature on matrix-vector and matrix-matrix multiplication, and computing the gradient in the presence
140
+ of stragglers, there is limited work on computing or approximating the inverse of a matrix [19]. The non-linearity of matrix
141
+ inversion prohibits linear or polynomial encoding of the data before the computations are to be performed. Consequently, most
142
+ CCMs cannot be directly utilized. GC is the appropriate CC set up to consider [20], precisely because the encoding takes place
143
+ once the computation has been completed, in contrast to most CMM methods where the encoding is done by the master, before
144
+ the data is distributed.
145
+ Here, we give a brief overview of the GC on which our CMIM is based. We also review “Lagrange Coded Computing” (LCC),
146
+ which has relations to our approach. Then, we give a summary of our proposed CMIM. All these rely on Lagrange interpolation
147
+ over finite fields.
148
+ Gradient codes are a class of codes designed to mitigate the effect of stragglers in data centers, by recovering the gradient of
149
+ differentiable and additively separable objective functions in distributed first order methods [20]. The proposed CMIM utilizes
150
+ BRS generator matrices constructed for GC [8]. The main difference from our work is that in GC the objective is to construct an
151
+ encoding matrix G and decoding vectors aI ∈ Ck, such that a⊤
152
+ I G = ⃗1 for any set of non-straggling workers indexed by I. To
153
+ do so, the decomposition of the BRS generator matrices GI = HIP is exploited, where HI is a Vandermonde matrix; and the
154
+ first row of P is equal to ⃗1. Subsequently a⊤
155
+ I is extracted as the first row of H−1
156
+ I .
157
+ In the proposed CMIM framework, the objective is to design an encoding-decoding pair ( ˜G, ˜DI) for which ˜DI ˜G = IN, for
158
+ all I ⊊ Nn of size k. The essential reason for requiring this condition, as opposed to that of GC, is that the empirical gradient of a
159
+ given dataset is the sum of each individual gradients, while in our scenario if the columns of �
160
+ A−1 are summed; they cannot then
161
+ be recovered.
162
+ The state-of-the art CC framework is LCC, which is used to compute arbitrary multivariate polynomials of a given dataset
163
+ [5], [21]. This approach is based on Lagrange interpolation, and it achieves the optimal trade-off between resiliency, security,
164
+ and privacy. The problem we are considering is not a multivariate polynomial in terms of A. To securely communicate A to
165
+ the workers, we encode it through Lagrange interpolation. Though similar ideas appear in LCC, the purpose and application of
166
+ the interpolation is different. Furthermore, LCC is a point-based approach [22] and requires additional interpolation and linear
167
+ combination steps after the decoding takes place.
168
+ Recall that the workers in the CMIM must compute blocks of �
169
+ A−1. Once they complete their computations, they encode them
170
+ by computing a linear combination with coefficients determined by a sparsest-balanced MDS generator matrix. Referring to the
171
+ advantages claimed for CMIM in Section I, working with MDS generator matrices allows us to meet points (i) and (ii), while BRS
172
+ generator matrices also help us satisfy (iii). Once the recovery threshold is met, the coordinator can recover the approximation
173
+
174
+ A−1. The structure of sparsest-balanced generator matrices is also leveraged to optimally allocate tasks to the workers, while
175
+ linear encoding is what allows minimal communication load from the workers to the master. Security against eavesdroppers is
176
+ guaranteed by encoding the local data through a modified Lagrange interpolation polynomial, before it is shared by the clients.
177
+ This CMIM also extends to approximating A† [1].
178
+ II. PRELIMINARY BACKGROUND
179
+ The set of N ×N non-singular matrices is denoted by GLN(R). Recall that A ∈ GLN(R) has a unique inverse A−1, such that
180
+ AA−1 = A−1A = IN. The simplest way of computing A−1 is by performing Gaussian elimination on
181
+
182
+ A|IN
183
+
184
+ , which gives
185
+
186
+ IN
187
+ ��A−1] in O(N 3) operations. In Algorithm 1, we approximate A−1 column-by-column. We denote the ith row and column
188
+ of A respectively by A(i) and A(i). The condition number of A is κ2 = ∥A∥2∥A−1∥2. For I an index subset of the rows of a
189
+ matrix M, the matrix consisting only of the rows indexed by I, is denoted by MI.
190
+ In the proposed algorithm we approximate N instances of the least squares minimization problem
191
+ θ⋆
192
+ ls = arg min
193
+ θ∈RM
194
+
195
+ ∥Aθ − y∥2
196
+ 2
197
+
198
+ (1)
199
+
200
+ 4
201
+ for A ∈ RN×M and y ∈ RN. In many applications N ≫ M, where the rows represent the feature vectors of a dataset. This has
202
+ the closed-form solution θ⋆
203
+ ls = A†y.
204
+ Computing A† to solve (1) is intractable for large M, as it requires computing the inverse of A⊤A. Instead, we use gradient
205
+ methods to get approximate solutions, e.g. SD or CG, which require less operations, and can be done distributively. One could use
206
+ second-order methods; e.g. Newton–Raphson, Gauss-Newton, Quasi-Newton, BFGS, or Krylov subspace methods instead. This
207
+ would be worthwhile future work.
208
+ When considering a minimization problem with a convex differentiable objective function ψ: Θ → R over an open constrained
209
+ set Θ ⊆ RM, as in (1), the SD procedure selects an initial θ[0] ∈ Θ, and then updates θ according to:
210
+ θ[t+1] = θ[t] − ξt · ∇θψ(θ[t])
211
+ for t = 1, 2, 3, ...
212
+ until a termination criterion is met, for ξt the step-size. The CG method is the most used and prominent iterative procedure for
213
+ numerically solving systems of positive-definite equations.
214
+ Our proposed coding scheme is defined over the finite field of q elements, Fq. We denote its cyclic multiplicative subgroup
215
+ by F×
216
+ q = Fq\{0Fq}. For implementation purposes, we identify finite fields with their realization in C as a subgroup of the circle
217
+ group, since we assume our data is over R. All operations can therefore be carried out over C. Specifically, for β ∈ F×
218
+ q a generator,
219
+ we identify βj with e2πij/q, and 0Fq with 1. The set of integers between 1 and ν is denoted by Nν.
220
+ III. BALANCED REED-SOLOMON CODES
221
+ A Reed-Solomon code RSq[n, k] over Fq for q > n > k, is the encoding of polynomials of degree at most k − 1, for k the
222
+ message length and n the code length. It represents our message over the defining set of points A = {αi}n
223
+ i=1 ⊂ Fq
224
+ RSq[n, k] =
225
+ ��
226
+ f(α1), f(α2), · · · , f(αn)
227
+ � ���
228
+ f(X) ∈ Fq[X] of degree ⩽ k − 1
229
+
230
+ where αi = αi, for α a primitive root of Fq. Hence, each αi is distinct. A natural interpretation of RSq[n, k] is through its encoding
231
+ map. Each message (m0, ..., mk−1) ∈ Fk
232
+ q is interpreted as f(x) = �k−1
233
+ i=0 mixi ∈ Fq[x], and f is evaluated at each point of A.
234
+ From this, RSq[n, k] can be defined through the generator matrix
235
+ G =
236
+
237
+
238
+
239
+
240
+
241
+ 1
242
+ α1
243
+ α2
244
+ 1
245
+ . . .
246
+ αk−1
247
+ 1
248
+ 1
249
+ α2
250
+ α2
251
+ 2
252
+ . . .
253
+ αk−1
254
+ 2
255
+ ...
256
+ ...
257
+ ...
258
+ ...
259
+ ...
260
+ 1
261
+ αn
262
+ α2
263
+ n
264
+ . . .
265
+ αk−1
266
+ n
267
+
268
+
269
+
270
+
271
+ � ∈ Fn×k
272
+ q
273
+ ,
274
+ thus, RS codes are linear codes over Fq. Furthermore, they attain the Singleton bound, i.e. d = n − k + 1, where d is the code’s
275
+ distance, which implies that they are MDS.
276
+ Balanced Reed-Solomon codes [15], [16] are a family of linear MDS error-correcting codes with generator matrices G ∈ Fn×k
277
+ q
278
+ that are:
279
+ • sparsest: each column has the least possible number of nonzero entries
280
+ • balanced: each row contains the same number of nonzero entries
281
+ for the given code parameters k and n. The design of these generators are suitable for our purposes, as:
282
+ 1) we have balanced loads across homogeneous workers,
283
+ 2) sparse generator matrices reduce the computation tasks across the network,
284
+ 3) the MDS property permits an efficient decoding step,
285
+ 4) linear codes produce a compressed representation of the encoded blocks.
286
+ A. Balanced Reed-Solomon Codes for CC
287
+ In the proposed CMIM, we leverage BRS generator matrices to approximate A−1. For simplicity, we will consider the case
288
+ where d = s + 1 = nw
289
+ k is a positive integer2, for n the number of workers and s the number of stragglers. Furthermore, d is
290
+ the distance of the code and ∥G(j)∥0 = d for all j ∈ Nk; ∥G(i)∥0 = w for all i ∈ Nn, and d > w since n > k. For decoding
291
+ purposes, we require that at least k = n − s workers respond. Consequently, d = s + 1 implies that n − d = k − 1. For simplicity,
292
+ we also assume d ⩾ n/2. In our setting, each column of G corresponds to a computation task of �
293
+ A−1; which we will denote by
294
+ ˆ
295
+ Ai, and each row corresponds to a worker.
296
+ 2The case where nw
297
+ k
298
+ ∈ Q+\Z+ is analysed in [8], and also applies to our approach. We restrict our discussion to the case where nw
299
+ k
300
+ ∈ Z+.
301
+
302
+ 5
303
+ Our choice of such a generator matrix G ∈ Fn×k
304
+ q
305
+ , solves
306
+ arg
307
+ min
308
+ G∈Fn×k
309
+ q
310
+
311
+ nnzr(G)
312
+
313
+ s.t.
314
+ ∥G(i)∥0 = w, ∀i ∈ Nn
315
+ ∥G(j)∥0 = d, ∀i ∈ Nk
316
+ rank(GI) = k, ∀I : |I| = k
317
+ (2)
318
+ which determines an optimal task allocation among the workers of the proposed CMIM.
319
+ Under the above assumptions, the entries of the generator matrix of a BRSq[n, k] code meet the following:
320
+ • each column is sparsest, with exactly d nonzero entries
321
+ • each row is balanced, with w = dk
322
+ n nonzero entries
323
+ where d equals to the number of workers who are tasked to compute each block, and w is the number of blocks that are computed
324
+ by each worker.
325
+ Each column G(j) corresponds to a polynomial pj(x), whose entries are the evaluation of the polynomial we define in (3) at
326
+ each of the points of the defining set A, i.e. Gij = pj(αi) for (i, j) ∈ Nn × Nk. To construct the polynomials {pj(x)}k
327
+ j=1, for
328
+ which deg(pj) ⩽ nnzr(G(j)) = n − d = k − 1, we first need to determine a sparsest and balanced mask matrix M ∈ {0, 1}n×k,
329
+ which is ρ-sparse for ρ = d
330
+ n; i.e. nnzr(G) = ρnk. We use the construction from [8], though it is fairly easy to construct more
331
+ general such matrices, by using the Gale-Ryser Theorem [23], [24]. Furthermore, deterministic constructions resemble generator
332
+ matrices of cyclic codes.
333
+ For our purposes we use B as our defining set of points, where each point corresponds to the worker with the same index. The
334
+ objective now is to devise the polynomials pj(x), for which pj(βi) = 0 if and only if Mij = 0. Therefore:
335
+ (I) Mij = 0
336
+ =⇒
337
+ (x − βi) | pj(x)
338
+ (II) Mij ̸= 0
339
+ =⇒
340
+ pj(βi) ∈ F×
341
+ q
342
+ for all pairs (i, j).
343
+ The construction of BRS[n, k]q from [15] is based on what the authors called scaled polynomials. Below, we summarize the
344
+ polynomial construction based on Lagrange interpolation [8]. We then prove a simple but important result that allows us to
345
+ efficiently perform the decoding step.
346
+ The univariate polynomials corresponding to each column G(j), are defined as:
347
+ pj(x) :=
348
+
349
+ i:Mij=0
350
+ � x − βi
351
+ βj − βi
352
+
353
+ =
354
+ k
355
+
356
+ ι=1
357
+ pj,ι · xι−1 ∈ Fq[x]
358
+ (3)
359
+ which satisfy (I) and (II). By the BCH bound [25, Chapter 9], it follows that deg(pj) ⩾ n − d = k − 1 for all j ∈ Nk. Since
360
+ each pj(x) is the product of n − d monomials, we conclude that the bound on the degree is satisfied and met with equality, hence
361
+ pj,ι ∈ F×
362
+ q for all coefficients.
363
+ By construction, both G and GI are decomposable into a Vandermonde matrix H ∈ Bn×k and a matrix comprised of the
364
+ polynomial coefficients H ∈ (F×
365
+ q )k×k [8]. Specifically, G = HP where Hij = βj−1
366
+ i
367
+ = βi(j−1) and Pij = pj,i are the
368
+ coefficients from (3). This can be interpreted as P(j) defining the polynomial pj(x), and H(i) is comprised of the first k positive
369
+ powers of βi in ascending order, therefore
370
+ pj(βi) =
371
+ k
372
+
373
+ ι=1
374
+ pj,ι · βι−1
375
+ i
376
+ = ⟨H(i), P(j)⟩.
377
+ The following lemmas will help us respectively establish in our CC setting the efficiency of our decoding step and the optimality
378
+ of the allocated tasks to the workers. For Lemma 1, recall that efficient matrix multiplication algorithms have complexity O(N ω),
379
+ for ω < 2.373 the matrix multiplication exponent [26].
380
+ Lemma 1. The restriction GI ∈ Fk×k
381
+ q
382
+ of G to any of its k rows indexed by I ⊊ Nn, is an invertible matrix. Moreover, its inverse
383
+ can be computed online in O(k2 + kω) operations.
384
+ Proof. The matrices H and P are of size n × k and k × k respectively. The restricted matrix GI is then equal to HIP, where
385
+ HI ∈ Fk×k
386
+ q
387
+ is a square Vandermonde matrix, which is invertible in O(k2) time [27]. Specifically
388
+ HI =
389
+
390
+
391
+
392
+
393
+
394
+ 1
395
+ βI1
396
+ β2
397
+ I1
398
+ . . .
399
+ βk−1
400
+ I1
401
+ 1
402
+ βI2
403
+ β2
404
+ I2
405
+ . . .
406
+ βk−1
407
+ I2
408
+ ...
409
+ ...
410
+ ...
411
+ ...
412
+ ...
413
+ 1
414
+ βIk
415
+ β2
416
+ Ik
417
+ . . .
418
+ βk−1
419
+ Ik
420
+
421
+
422
+
423
+
424
+ � ∈ Fk×k
425
+ q
426
+ .
427
+
428
+ 6
429
+ It follows that
430
+ det(HI) =
431
+
432
+ {i<j}⊆I
433
+ (βj − βi)
434
+ which is nonzero, since β is primitive. Therefore, HI is invertible. By [15, Lemma 1] and the BCH bound, we conclude that P is
435
+ also invertible. Hence, GI is invertible for any set I.
436
+ Note that the inverse of P can computed a priori by the master before we deploy our CCM. Therefore, computing G−1
437
+ I
438
+ online
439
+ with knowledge of P−1, requires an inversion of HI which takes O(k2) operations; and then multiplying it by P−1. Thus, it
440
+ requires O(k2 + kω) operations in total.
441
+
442
+ Lemma 2. The generator matrix G ∈ Fn×k
443
+ q
444
+ of a BRSq[n, k] MDS code defined by the polynomials pj(x) of (3), solves the
445
+ optimization problem (2).
446
+ Proof. The first two constraints are satisfied by the definition of G, which meets the sparsest and balanced constraints with
447
+ equality; for the given parameters. The last constraint is implied by the MDS theorem, which states that every set of k rows of G
448
+ is linearly independent.
449
+ The sparsity constraints of (2) imply that nnzr(G) ⩾ max{nw, kd}, and for our parameters we have nw = kd. This condition
450
+ is met with equality for the chosen G, as
451
+ nnzr(G) =
452
+
453
+ j∈Nk
454
+ nnzr(G(j))
455
+ =
456
+
457
+ j∈Nk
458
+ #
459
+
460
+ pj(βi) ̸= 0 : βi ∈ B
461
+
462
+ =
463
+
464
+ j∈Nk
465
+ n −
466
+
467
+ i : Mij = 0
468
+
469
+ =
470
+
471
+ j∈Nk
472
+ n − (n − d)
473
+ = kd
474
+ and the proof is complete.
475
+
476
+ IV. INVERSE APPROXIMATION ALGORITHM
477
+ Our goal is to estimate A−1 =
478
+
479
+ b1 · · · bN
480
+
481
+ , for A a square matrix of order N. A key property to note is
482
+ AA−1 = A
483
+
484
+ b1 · · · bN
485
+
486
+ =
487
+
488
+ Ab1 · · · AbN
489
+
490
+ = IN
491
+ which implies that Abi = ei for all i ∈ NN, where ei are the standard basis column vectors. Assume for now that we use any
492
+ black-box least squares solver to estimate
493
+ ˆbi ≈ arg min
494
+ b∈RN
495
+
496
+ fi(b) := ∥Ab − ei∥2
497
+ 2
498
+
499
+ (4)
500
+ which we call N times, to recover �
501
+ A−1 :=
502
+ �ˆb1 · · · ˆbN
503
+
504
+ . This approach may be viewed as approximating
505
+
506
+ A−1 ≈ arg
507
+ min
508
+ B∈RN×N
509
+
510
+ ∥AB − IN∥2
511
+ F
512
+
513
+ .
514
+ Alternatively, one could estimate the rows of A−1. Algorithm 1 shows how this can be performed by a single server.
515
+ Algorithm 1: Estimating A−1
516
+ Input: A ∈ GLN(R)
517
+ for i=1 to N do
518
+ approximate ˆbi ≈ arg minb∈RN
519
+
520
+ ∥Ab − ei∥2
521
+ 2
522
+
523
+ end
524
+ return �
525
+ A−1 ←
526
+ �ˆb1 · · · ˆbN
527
+
528
+ In the case where SD is used to approximate ˆbi from (4), the overall operation count is O(TiN 2); for Ti the total number of
529
+ descent iterations used. An upper bound on the number of iterations can be determined by the underlying termination criterion,
530
+ e.g. the criterion fi(ˆb[t]) − fi(b⋆
531
+ ls) ⩽ ϵ is guaranteed to be satisfied after T = O(log(1/ϵ)) iterations [28]. The overall error of
532
+
533
+ A−1 may be quantified as
534
+
535
+ 7
536
+ • errℓ2( �
537
+ A−1) := ∥ �
538
+ A−1 − A−1∥2
539
+ • errF ( �
540
+ A−1) := ∥ �
541
+ A−1 − A−1∥F
542
+ • errrF ( �
543
+ A−1) := ∥ �
544
+ A−1−A−1∥F
545
+ ∥A−1∥F
546
+ =
547
+ N
548
+
549
+ i=1
550
+ ∥Aˆbi−ei∥2
551
+ ∥A−1∥F
552
+ .
553
+ To approximate A−1 distributively, each of the n servers are asked to estimate τ-many ˆbi’s in parallel. When using SD, the
554
+ worst-case runtime by the workers is O(τTmaxN 2), for Tmax the maximum number of iterations of SD among the workers. If CG
555
+ is used, each worker needs no more than a total of Nτ CG steps to exactly compute its task, i.e. O(τNκ2) operations; as each
556
+ instance of (4) is expected to converge in O(κ2) iterations, which is the worst case runtime [29], [30].
557
+ In order to bound errrF ( �
558
+ A−1) = ∥ �
559
+ A−1−A−1∥F
560
+ ∥A−1∥F
561
+ , we first upper bound the numerator and then lower bound the denominator.
562
+ Since ∥A−1 − �
563
+ A−1∥2
564
+ F = �N
565
+ i=1 ∥A−1ei − ˆbi∥2
566
+ 2, bounding the numerator reduces to bounding ∥A−1ei − ˆbi∥2
567
+ 2 for all i ∈ NN.
568
+ This is straightforward
569
+ ∥A−1ei − ˆbi∥2
570
+ 2
571
+
572
+ ⩽ 2
573
+
574
+ ∥A−1ei∥2
575
+ 2 + ∥ˆbi∥2
576
+ 2
577
+
578
+ $
579
+ ⩽ 2
580
+
581
+ ∥A−1∥2
582
+ 2 · ∥ei∥2
583
+ 2 + ∥ˆbi∥2
584
+ 2
585
+
586
+ = 2
587
+
588
+ 1/σmin(A)2 + ∥ˆbi∥2
589
+ 2
590
+
591
+ (5)
592
+ where in ♦ we use the fact that ∥u − v∥2
593
+ 2 ⩽ 2(∥u∥2
594
+ 2 + ∥v∥2
595
+ 2), and in $ the submultiplicativity of the ℓ2-norm is invoked. For the
596
+ denominator, by the definition of the Frobenius norm
597
+ ∥A−1∥2
598
+ F =
599
+ N
600
+
601
+ i=1
602
+ 1
603
+ σi(A)2 ⩾
604
+ N
605
+ σmax(A)2 .
606
+ (6)
607
+ By combining (5) and (6) we get
608
+ errrF ( �
609
+ A−1) ⩽
610
+
611
+ 2
612
+
613
+ N/σmin(A)2 + �N
614
+ i=1 ∥ˆbi∥2
615
+ 2
616
+ N/σmax(A)2
617
+ �1/2
618
+ =
619
+
620
+ 2
621
+
622
+ κ2
623
+ 2 + σmax(A)2
624
+ N
625
+ ·
626
+ N
627
+
628
+ i=1
629
+ ∥ˆbi∥2
630
+ 2
631
+ �1/2
632
+ .
633
+ This is an additive error bound in terms of the problem’s condition number, which also shows a dependency on the estimates
634
+ {ˆbi}N
635
+ i=1. Propositions 3 and 4 give error bounds when using SD and CG as the subroutine of Algorithm 1 respectively.
636
+ Proposition 3. For A ∈ GLN(R), we have errF ( �
637
+ A−1) ⩽
638
+ ϵ√
639
+ N/2
640
+ σmin(A)2 and errrF ( �
641
+ A−1) ⩽
642
+ ϵ√
643
+ N/2
644
+ σmin(A), when using SD to solve (4) with
645
+ termination criteria ∥∇fi(b[t])∥2 ⩽ ϵ for each i.
646
+ Proof. Recall that for a strongly-convex function with strong-convexity parameter µ, we have the following optimization gap [28,
647
+ Section 9.1.2]
648
+ fi(b) − fi(b⋆
649
+ ls) ⩽ 1
650
+ 2µ · ∥∇fi(b)∥2
651
+ 2 .
652
+ (7)
653
+ For A ∈ GLN(R) in (4), the constant is µ = σmin(A)2. By fixing ϵ =
654
+
655
+ 2σmin(A)2η, we have η = 1
656
+ 2 ·
657
+
658
+ ϵ
659
+ σmin(A)
660
+ �2
661
+ . Thus, by (7)
662
+ and our termination criterion:
663
+ ∥∇fi(b)∥2 ⩽
664
+
665
+ 2σmin(A)2η
666
+ =⇒
667
+ fi(b) − fi(b⋆
668
+ ls) ⩽ η ,
669
+ so when solving (4) we get
670
+ fi(b) − fi(b⋆
671
+ ls) = fi(b) − 0 = ∥Aˆbi − ei∥2
672
+ 2 ,
673
+ hence
674
+ ∥Aˆbi − ei∥2
675
+ 2 ⩽ 1
676
+ 2 ·
677
+
678
+ ϵ
679
+ σmin(A)
680
+ �2
681
+ (8)
682
+
683
+ 8
684
+ for all i ∈ NN. We want an upper bound for each summand ∥A−1ei − ˆbi∥2
685
+ 2 of the numerator of errrF ( �
686
+ A−1)2:
687
+ ∥A−1ei − ˆbi∥2
688
+ 2 = ∥A−1(ei − Aˆbi)∥2
689
+ 2
690
+ ⩽ ∥A−1∥2
691
+ 2 · ∥ei − Aˆbi∥2
692
+ 2
693
+
694
+ ⩽ ∥A−1∥2
695
+ 2 · 1
696
+ 2 ·
697
+
698
+ ϵ
699
+ σmin(A)
700
+ �2
701
+ (9)
702
+ =
703
+ ϵ2
704
+ 2σmin(A)4
705
+ (10)
706
+ where ♯ follows from (8), thus errF ( �
707
+ A−1)2 ⩽
708
+ Nϵ2
709
+ 2σmin(A)4 . Substituting (9) into the definition of errrF ( �
710
+ A−1) gives us
711
+ errrF ( �
712
+ A−1)2 ⩽ ∥A−1∥2
713
+ 2
714
+ ∥A−1∥2
715
+ F
716
+ · N
717
+ 2 ·
718
+
719
+ ϵ
720
+ σmin(A)
721
+ �2 ‡
722
+
723
+ Nϵ2/2
724
+ σmin(A)2
725
+ where ‡ follows from the fact that ∥A−1∥2
726
+ 2 ⩽ ∥A−1∥2
727
+ F .
728
+
729
+ In the experiments of Subsection IV-A, we verify that Proposition 3 holds for Gaussian random matrices. The dependence on
730
+ 1/σmin(A) is an artifact of using gradient methods to solve the underlying problems (4), since the error will be multiplied by
731
+ ∥A−1∥2
732
+ 2. In theory, this can be annihilated if one runs the algorithm on pA for p ≈ 1/σmin(A), followed by multiplication of
733
+ the final result by p. This is a way of preconditioning SD. In practice, the scalar p should not be selected to be much larger than
734
+ 1/σmin(A), as it could result in �
735
+ A−1 ≈ 0N×N.
736
+ Proposition 4. Assume Algorithm 1 uses CG to solve (4). Then, in O
737
+
738
+ N√κ2 ln(1/ϵ)
739
+
740
+ iterations, we have errF ( �
741
+ A−1) ⩽ Nϵ.
742
+ Moreover, if A⊤A has ˜N distinct eigenvalues, it converges in at most ˜NN steps.
743
+ Proof. By [29, Section 10] and [31, Section 2], we know that for each subroutine (4) of Algorithm 3, CG requires at most
744
+ O(√κ2 ln(1/ϵ)) iterations in order to attain an ϵ-optimal point, for each ˆbi. Hence, considering all approximate columns {ˆbi}N
745
+ i=1,
746
+ we conclude that the total error in terms of the Frobenius norm of �
747
+ A−1, is at most Nϵ.
748
+ Recall that in order to solve (1) with the CG method in the case where A is neither symmetric, positive-definite, nor square, we
749
+ apply the CG iteration to the normal equations
750
+ A⊤Ay = A⊤θ .
751
+ This follows by setting the derivative of (1) to zero. In our scenario, we are assuming that A ∈ GLN(R), hence A⊤A is full-rank
752
+ and symmetric, thus CG in its simplest form can be used to solve the minimization problems of Algorithm 1. By [30, Theorem
753
+ 38.4], it follows that each instance of (4) converges in at most ˜N steps.
754
+
755
+ Even though Proposition 4 guarantees convergence in at most ˜NN steps, it does not assume floating-point arithmetic. Therefore,
756
+ this does not hold in practical settings. Our experiments though show that after significantly less steps, we achieve approximations
757
+ of negligible error, which is sufficient for ML and FL applications.
758
+ A. Numerical Experiments
759
+ The accuracy of the proposed algorithms was tested on randomly generated matrices, using both SD and CG for the subroutine
760
+ optimization problems. The depicted results are averages of 20 runs, with termination criteria ∥∇fi(b[t])∥2 ⩽ ϵ for SD and
761
+ ∥b[t]
762
+ i − b[t−1]
763
+ i
764
+ ∥2 ⩽ ϵ for CG, for the given ϵ accuracy parameters. We considered A ∈ R100×100. The error subscripts represent
765
+ A = {ℓ2, F, rF}, N = {ℓ2, F}. We note that significantly fewer iterations took place when CG was used for the same ϵ, though
766
+ this depends heavily on the choice of the step-size. The errors observed in the case of CG, are due to floating-point arithmetic.
767
+ Therefore, there is a trade-off between accuracy and speed when using SD vs. CG.
768
+ Average �
769
+ A−1 errors, for A ∼ 50 · N(0, 1) — SD
770
+ ϵ
771
+ 10−1
772
+ 10−2
773
+ 10−3
774
+ 10−4
775
+ 10−5
776
+ errA
777
+ O(10−2)
778
+ O(10−5)
779
+ O(10−7)
780
+ O(10−9)
781
+ O(10−12)
782
+ Average �
783
+ A−1 errors, for A ∼ 50 · N(0, 1) — CG
784
+ ϵ
785
+ 10−3
786
+ 10−4
787
+ 10−5
788
+ 10−6
789
+ 10−7
790
+ errN
791
+ O(10−3)
792
+ O(10−5)
793
+ O(10−8)
794
+ O(10−11) O(10−12)
795
+ errrF
796
+ O(10−3)
797
+ O(10−5)
798
+ O(10−7)
799
+ O(10−10) O(10−12)
800
+ We utilized Algorithm 1 in Newton’s method, for classifying images of four and nine from MNIST, by solving a regularized
801
+ logistic regression minimization problem. For Algorithm 1, we used CG with a fixed number of iteration per column estimation.
802
+ It is clear from Figure 4 that we require no more than 18 iterations per column estimate, for N = 785, to attain the optimal
803
+ classification rate. With more than 18 CG iterations, the same classification rate was obtained.
804
+
805
+ 9
806
+ 14
807
+ 15
808
+ 16
809
+ 17
810
+ 18
811
+ 19
812
+ CG iterarions per column
813
+ 0
814
+ 0.05
815
+ 0.1
816
+ 0.2
817
+ 0.3
818
+ 0.4
819
+ 0.45
820
+ Classification error %
821
+ Classification Error
822
+ inversion with CG
823
+ exact inversion
824
+ Fig. 1. MNIST classification error, where Algorithm 1 is used in Newton’s method. In red, we depict the error when exact inversion was used.
825
+ V. FEDERATED CODED MATRIX INVERSION
826
+ In this section, we describe the proposed CMIM (also presented in [1]) which makes Algorithm 1 resilient to stragglers, and
827
+ show how it can be applied to the FL scenario described in the introduction. The CMIM workflow is depicted in Figure 2.
828
+ Our FL-scheme can be broken up in to four phases: (a) the coordinator shares elements β, H of a finite field with all the clients,
829
+ (b) the clients each generate a pseudorandom permutation (PRP) σι, encrypt their corresponding data block Aι through a matrix
830
+ polynomial fι(x), and broadcast {fι(x), σ−1
831
+ ι
832
+ } to the other clients, (c) the clients recover A, compute and encode their assigned
833
+ task Wι, which is communicated to the coordinator, (d) the coordinator decodes once sufficiently many servers respond. It is also
834
+ possible that β, H are determined collectively by the clients, or by a single client, which makes the data sharing secure against a
835
+ curious and dishonest coordinator.
836
+ Fig. 2. Algorithmic workflow of the CMIM, as proposed in [1]. The master shares f(x), an encoding analogous to (12), along with β, {η−1
837
+ j
838
+ }k
839
+ j=1. The workers
840
+ then recover A, compute their assigned tasks, and encode them according to G. Once k encodings Wι are sent back, �
841
+ A−1 can be recovered.
842
+ In our proposed FL approach, we assume there is a trustworthy coordinator who shares certain parameters to each of the k
843
+ clients which constitute the network; e.g. hospitals in a health care network, each of which are comprised of multiple servers.
844
+ What we present works for the case where the clients have local datasets of different sizes, {Ni}k
845
+ i=1. This would result in the
846
+ encoding functions fι(x) having different degrees, or their matrix coefficients being of a different size. In our setting we assume
847
+ the servers are homogeneous, i.e. they have the same computational power. Therefore, equal computational loads are assigned to
848
+ each of them. In order to keep the notation and size of the communication loads consistent, we assume w.l.o.g. that Aι ∈ RN×T
849
+ for all ι ∈ Nk. If this is not the case, before fι(x) are determined, the clients could perform a data exchange phase (e.g. [13]), so
850
+ that Ni = Nj for all i ̸= j. By this, it follows that the number of blocks does not have to be equal to the number of clients. The
851
+ example we describe, is simply a motivation. A flowchart of our approach is presented in Figure 3.
852
+ Moreover, in the case where M > N; for M = �k
853
+ i=1 Ni, we can select a subset of features and/or samples, so that the resulting
854
+ data matrix we consider is square. This can be interpreted as using the surrogate ˜A = SA, where S ∈ RN×M is an appropriate
855
+ (sparse) sketching matrix for matrix inversion [32], which the workers agree on.
856
+ First, in V-A we argue why all of A needs to be known by each of the workers, in order to recover entries or columns of its
857
+ inverse. Then, in V-B we focus on phases (a) and (b), where we utilize Lagrange interpolation to securely share A among the
858
+ workers. We discuss the computation tasks the workers are requested to compute, which are blocks of �
859
+ A−1; and collectively
860
+ correspond to the subroutine problems of Algorithm 1. In V-C we focus on (c) and (d), where we show how the servers encode
861
+ their computations, and describe the coordinator’s decoding step. Optimality of BRS generator matrices in terms of the encoded
862
+ communication loads is established in V-D.
863
+ When assuming floating-point arithmetic, our approach introduces no numerical nor approximation errors. The errors are a
864
+ consequence of using iterative solvers to estimate (4), which we utilize to linearly separate the computations. Therefore, if the
865
+ workers can recover the optimal solutions to the underlying minimization problems, our scheme would be exact.
866
+
867
+ >A-i =[Ai ... Ak
868
+ W1
869
+ X
870
+ Wn
871
+ X
872
+ f(×)
873
+ f(x)
874
+ f(x)10
875
+ Fig. 3. Flowchart of our proposal, where k = ni = 4 for all i ∈ N4.
876
+ A. Knowledge of A is necessary
877
+ A bottleneck when computing the inverse of a matrix; or estimating its columns, is that the entire matrix needs to be known.
878
+ A single change in the matrix’s entries may result in a non-singular matrix, which conveys how sensitive Gaussian elimination is.
879
+ Such problems are extensively studied in conditioning and stability of numerical analysis [30], and in perturbation theory. This is
880
+ not a focus of our work.
881
+ In the case where only one column is not known, one can determine the subspace in which the missing column lies, but without
882
+ the knowledge of at least one entry of that column, it would be impossible to recover that column. Even with such an approach or
883
+ a matrix completion algorithm, the entire A is determined before we proceed to inverting A; or performing linear regression to
884
+ approximate Ab = ei as in (4).
885
+ Another example, relating to our FL set up, is the case where one of the blocks is different. This could lead to drastic
886
+ miscalculations. In the following example, we consider n = k = 2 and N = 4, where the second server sends a different
887
+ block, which are indicated by a different color and font:
888
+ A1 =
889
+
890
+
891
+
892
+ 6
893
+ 2
894
+ 2
895
+ -5
896
+ 0
897
+ −1
898
+ 2
899
+ 0
900
+ −5
901
+ 6
902
+ -1
903
+ -3
904
+ 5
905
+ −3
906
+ -4
907
+ 3
908
+
909
+
910
+
911
+ A2 =
912
+
913
+
914
+
915
+ 6
916
+ 2
917
+ −1
918
+ −3
919
+ 0
920
+ −1
921
+ 5
922
+ 6
923
+ −5
924
+ 6
925
+ 3
926
+ −2
927
+ 5
928
+ −3
929
+ 1
930
+ 6
931
+
932
+
933
+ � .
934
+ It follows that ∥A−1
935
+ 1 ∥F ≈ 90.45, ∥A−1
936
+ 2 ∥F ≈ 1, and ∥A−1
937
+ 1
938
+ − A−1
939
+ 2 ∥0 = 16; i.e. no entries of A−1
940
+ 1
941
+ and A−1
942
+ 2
943
+ are equal.
944
+ Furthermore, by the data processing inequality [33, Corollary pg.35], the above imply that no less than N 2 information symbols
945
+ can be known by each server, while hoping to approximate a column of A−1. Hence, all clients need full knowledge of each others
946
+ information, and cannot communicate less than NT symbols to each other. This is a consequence of the fact that a dense vector is
947
+ not recoverable from underdetermined linear measurements. They can however send an encoded version of their respective block
948
+ Aι ∈ RN×T to the other clients consisting of NT symbols, determined by a modified Lagrange polynomial, which guarantees
949
+ security against eavesdroppers.
950
+ Similar cryptographic protocols date back to the SSS algorithm [34], which is also based on RS codes. This idea has extensively
951
+ been exploited in LCC [5], yet differs from our approach.
952
+ B. Phases (a), (b) — Data Encryption and Sharing
953
+ Let k, γ ∈ Z+ be factors of N and T respectively, so that T = N
954
+ k and Γ = T
955
+ γ .3 The coordinator constructs a set of distinct
956
+ interpolation points B = {βj}n
957
+ j=1 ⊊ F×
958
+ q , for q > n ⩾ γ.4 To construct this set, it suffices to sample β ∈ F×
959
+ q ; any one of the
960
+ φ(q − 1) primitive roots of Fq (φ is Euler’s totient function), which is a generator of the multiplicative group (F×
961
+ q , ·), and define
962
+ 3If γ ∤ T, append 0T ×1 to the end of the first ˜γ = T(modγ) blocks which are each comprised of ˜Γ = ⌊ T
963
+ γ ⌋ columns of Aι, while the remaining γ − ˜γ
964
+ blocks are comprised of ˜Γ + 1 columns. Now, each block is of size T × (˜Γ + 1).
965
+ 4For the encodings of the Aι’s, γ points suffice, and we only need to require q > γ. We select B of cardinality n and require q > n ⩾ γ, in order to reuse B
966
+ in our CCM.
967
+
968
+ β,H
969
+ β,H
970
+ B,H
971
+ β,H
972
+ A
973
+ A
974
+ 88
975
+ fi(x),q1
976
+ f2(x),02
977
+ Clients share their corre
978
+ sponding fi(×) and o,-1
979
+ Clients recover A = Ai A2 A3 A4
980
+ Servers carry out computations, and each
981
+ client sends back W, to the coordinator
982
+ =
983
+ Once the threshold is met,11
984
+ each point as βj = βj. Then, a random multiset H = {ηj}γ
985
+ j=1 ∈ 2F×
986
+ q of size γ is generated, i.e. repetitions in H are allowed,
987
+ which will be used to remove the structure of the Lagrange coefficients, as the adversaries could exploit their structure to reveal
988
+ β.
989
+ The element β and set H, are broadcasted securely to all the workers through a public-key cryptosystem, e.g. RSA or McEliece.
990
+ Matrices Aι are partitioned into γ blocks
991
+ Aι =
992
+
993
+ A1
994
+ ι · · · Aγ
995
+ ι
996
+
997
+ where Ai
998
+ ι ∈ RN×Γ, ∀i ∈ Nγ,
999
+ (11)
1000
+ and each client generates a PRP σι ∈ Sγ. The blocks {Aι}k
1001
+ ι=1 are encrypted locally through the univariate polynomials
1002
+ fι(x) =
1003
+ γ
1004
+
1005
+ j=1
1006
+ Aj
1007
+ ι · ησι(j)
1008
+
1009
+ ��
1010
+ l̸=j
1011
+ x − βl
1012
+ βj − βl
1013
+
1014
+
1015
+ (12)
1016
+ for which fι(βj) = ησι(j)Aj
1017
+ ι.
1018
+ The clients then broadcast {fι(x), σ−1
1019
+ ι
1020
+ } to each other, and their servers can then recover all Aι’s as follows:
1021
+ Aι =
1022
+
1023
+ ησ−1
1024
+ ι
1025
+ (1)f(β1) · · · ησ−1
1026
+ ι
1027
+ (γ)f(βγ)
1028
+
1029
+ ∈ RN×T .
1030
+ (13)
1031
+ The coefficients of fι(x) are comprised of NΓ symbols, thus, each polynomial consists of a total of NT symbols, which is the
1032
+ minimum number of symbols needed to be communicated. The PRP σι is generated locally by the clients, to ensure that each
1033
+ fι(x) differs by more than just the matrix partitions.
1034
+ We assume Kerckhoffs’ principle, which states that everyone has knowledge of the system, including the messages fι(x). For
1035
+ the proposed CMIM, as long as {β, H} and σ−1
1036
+ ι
1037
+ are securely communicated, even if fι(x) is revealed, the block Aι is secure
1038
+ against polynomial-bounded adversaries (this is the security level assumed by the cryptosystems used for the communication).
1039
+ Proposition 5. The encryptions of Aι through fι(x), are as secure against eavesdroppers as the public-key cryptosystems which
1040
+ are used when broadcasting {β, H} and σ−1
1041
+ ι
1042
+ . To recover Aι, an adversary needs to intercept both communications, and break
1043
+ both cryptosystems.
1044
+ Proof. We prove this by contradiction. Assume that an adversary was able to reverse the encoding fι(x) of Aι. This implies that
1045
+ he was able to reveal β and σι(H) := {ησι(j)}γ
1046
+ j=1. The only way to reveal these elements, is he was able to both intercept and
1047
+ decipher the public-key cryptosystem used by the coordinator, which contradicts the security of the cryptosystem.
1048
+ In order to invert the multiplications of σι(H) for each of the evaluations of fι(x), both H and σ−1
1049
+ ι
1050
+ need to be known. To do so,
1051
+ the adversary needs to intercept both the communication between the coordinator and the clients, and the communication between
1052
+ the clients, as well as breaking both the cryptosystems used to securely carry out these communications.
1053
+
1054
+ C. Phases (c), (d) — Computations, Encoding and Decoding
1055
+ At this stage, the workers have knowledge of everything they need in order to recover A, before they carry out their computation
1056
+ tasks. By (13), the recovery is straightforward.
1057
+ For Algorithm 1, any CCM in which the workers compute an encoding of partitions of the resulting computation E =
1058
+
1059
+ E1 · · · Ek
1060
+
1061
+ could be utilized. It is crucial that the encoding takes place on the computed tasks {Ei}k
1062
+ i=1 in the scheme, and
1063
+ not the assigned data or partitions of the matrices that are being computed over (such CMM leverage the linearity of matrix
1064
+ multiplication), otherwise the algorithm could potentially not return the correct approximation. This also means that utilizing such
1065
+ encryption approaches (e.g. [5]) for guaranteeing security against the workers, is not an option. We face these restrictions due to
1066
+ the fact that matrix inversion is a non-linear operator.
1067
+ The computation tasks Ei correspond to a partitioning �
1068
+ A−1 =
1069
+ � ˆ
1070
+ A1 · · ·
1071
+ ˆ
1072
+ Ak
1073
+
1074
+ , of our approximation from Algorithm 1. We
1075
+ propose a linear encoding of the computed blocks { ˆ
1076
+ Ai}k
1077
+ i=1 based on generators satisfying (2). Along with the proposed decoding
1078
+ step, we have a MDS-based CCM for matrix inversion.
1079
+ We consider the same parameters as in V-B, in order to reuse B in the proposed CMIM. Each ˆ
1080
+ Ai is comprised of T distinct but
1081
+ consecutive approximations of (4), i.e.
1082
+ ˆ
1083
+ Ai =
1084
+ �ˆb(i−1)T +1 · · · ˆbiT
1085
+
1086
+ ∈ RN×T
1087
+ ∀i ∈ Nk,
1088
+ which could also be approximated by iteratively solving
1089
+ ˆ
1090
+ Ai ≈ arg min
1091
+ B∈RN×T
1092
+ ���AB −
1093
+
1094
+ e(i−1)T +1 · · · eiT
1095
+ ���2
1096
+ F
1097
+
1098
+ .
1099
+ Without loss of generality, we assume that the workers use the same algorithms and parameters for estimating the columns
1100
+ {ˆbi}N
1101
+ i=1. Therefore, workers allocated the same tasks are expected to get equal approximations in the same amount of time.
1102
+
1103
+ 12
1104
+ For our CCM, we leverage BRS generator matrices for both the encoding and decoding steps. We adapt the GC framework,
1105
+ so we need an analogous condition to a⊤
1106
+ I G = ⃗1 for the CMIM; in order to invoke Algorithm 1. The condition we require is
1107
+ ˜DI ˜G = IN, for an encoding-decoding pair ( ˜G, ˜DI).
1108
+ From our discussion on BRS codes in III-A, we set ˜G = IT ⊗ G and ˜DI = IT ⊗ (GI)−1 for any given set of k responsive
1109
+ servers indexed by I. The index set of blocks requested from the ιth worker to compute is denoted by Jι, and has cardinality w.
1110
+ The workers’ encoding steps correspond to
1111
+ ˜G · ( �
1112
+ A−1)⊤ = (IT ⊗ G) ·
1113
+
1114
+ ��
1115
+ ˆ
1116
+ A⊤
1117
+ 1
1118
+ ...
1119
+ ˆ
1120
+ A⊤
1121
+ k
1122
+
1123
+ �� =
1124
+
1125
+
1126
+
1127
+
1128
+
1129
+
1130
+
1131
+ j∈J1
1132
+ pj(β1) · ˆ
1133
+ A⊤
1134
+ j
1135
+ ...
1136
+
1137
+ j∈Jn
1138
+ pj(βn) · ˆ
1139
+ A⊤
1140
+ j
1141
+
1142
+
1143
+
1144
+
1145
+
1146
+
1147
+ (14)
1148
+ which are carried out locally, once they have computed their assigned tasks. We denote the encoding of the ιth worker by Wι ∈
1149
+ CT ×N, i.e. Wι = �
1150
+ j∈Jι pj(βι) · ˆ
1151
+ A⊤
1152
+ j , which are sent to the coordinator. The received encoded computations by any distinct k
1153
+ servers indexed by I, constitute ˜GI · ( �
1154
+ A−1)⊤.
1155
+ Lemma 1 implies that as long as k workers respond, the approximation �
1156
+ A−1 is recoverable. Moreover, the decoding step
1157
+ reduces to a matrix multiplication of k × k matrices. Applying HI to a square matrix can be done in O(k2 log k), through the
1158
+ FFT algorithm. The prevailing computation in our decoding, is applying P−1. The decoding step is
1159
+ ˜DI ·
1160
+
1161
+ ˜GI · ( �
1162
+ A−1)⊤�
1163
+ =
1164
+
1165
+ IT ⊗ (GI)−1�
1166
+ ·
1167
+
1168
+ IT ⊗ GI
1169
+
1170
+ · ( �
1171
+ A−1)⊤
1172
+ = (IT · IT ) ⊗
1173
+
1174
+ (GI)−1 · GI
1175
+
1176
+ · ( �
1177
+ A−1)⊤
1178
+ = IT ⊗ Ik · ( �
1179
+ A−1)⊤
1180
+ = ( �
1181
+ A−1)⊤
1182
+ and our scheme is valid.
1183
+ The above CCM therefore has a linear encoding done locally by the servers (14), is MDS since s = d − 1, and its decoding
1184
+ step reduces to computing and applying G−1
1185
+ I
1186
+ (Lemma 1). The security of the encodings rely on the secrecy of B, which were
1187
+ sent from the coordinator to the workers. For an additional security layer, the interpolation points of B could instead be defined as
1188
+ βj = βπ(j), for π ∈ Sn a PRP. In this case, π−1 would also need to be securely broadcasted.
1189
+ 0
1190
+ 1
1191
+ 2
1192
+ 3
1193
+ 4
1194
+ 5
1195
+ 6
1196
+ 7
1197
+ 8
1198
+ 106
1199
+ 0
1200
+ 20
1201
+ 40
1202
+ 60
1203
+ 80
1204
+ 100
1205
+ 120
1206
+ 4.5
1207
+ 5
1208
+ 5.5
1209
+ 6
1210
+ 6.5
1211
+ 7
1212
+ 0
1213
+ 20
1214
+ 40
1215
+ 60
1216
+ 80
1217
+ 100
1218
+ 120
1219
+ Fig. 4. Comparison of decoding complexity, when naive matrix inversion is used (so O(k3)) compared to the decoding step implied by Lemma 1, for n = 200
1220
+ and varying s. We also provide a logarithmic scale comparison.
1221
+ With the above framework, any sparsest-balanced generator MDS matrix [23] would suffice, as long as it satisfies the MDS
1222
+ theorem [35]. By Lemma 1, if we set k = Ω(
1223
+
1224
+ N) (similar to [7]), the decoding step could then be done in O(N ω/2) = o(N 1.187),
1225
+ which is close to linear in terms of N.
1226
+ Theorem 6. Let G ∈ Fn×k be a generator matrix of any MDS code over F, for which ∥G(j)∥0 = n − k + 1 and ∥G(i)∥0 = w
1227
+ for all (i, j) ∈ Nn × Nk. By utilizing Algorithm 1, we can devise a linear MDS coded matrix inversion scheme; through the
1228
+ encoding-decoding pair ( ˜G, ˜DI).
1229
+ Proof. The encoding coefficients applied locally by each of the n workers correspond to a row of G. The encodings of all the
1230
+ workers then correspond to ˜G · ( �
1231
+ A−1)⊤, for ˜G = IT ⊗ G, as in (14). Consider any set of responsive workers I of size k, whose
1232
+ encodings constitute ˜GI ·( �
1233
+ A−1)⊤. By the MDS theorem, GI is invertible. Hence, the decoding step reduces to inverting GI; i.e.
1234
+ ˜DI = IT ⊗ (GI)−1, and is performed online.
1235
+
1236
+ Constructions based on cyclic MDS codes, which have been used to devise GC schemes [36], can also be considered. These
1237
+ encoding matrices are not sparsest-balanced, which makes them suitable when considering heterogeneous workers.
1238
+ Proposition 7. Any cyclic [n, k] MDS code C over F ∈ {R, C} can be used to devise a coded matrix inversion encoding-decoding
1239
+ pair ( ˜G, ˜DI).
1240
+
1241
+ 13
1242
+ Proof. Consider a cyclic [n, n − s] MDS code C over F ∈ {R, C}. Recall that from our assumptions, we have s = n − k. By [36,
1243
+ Lemma 8], there exists a codeword g1 ∈ C of support d = s + 1, i.e. ∥g1∥0 = d. Since C is cyclic, it follows that the cyclic shifts
1244
+ of g1 also lie in C. Denote the n − 1 consecutive cyclic shifts of g1 by {gi}n
1245
+ i=2 ⊊ C ⊊ F1×n, which are all distinct. Define the
1246
+ cyclic matrix
1247
+ ¯G :=
1248
+
1249
+
1250
+ |
1251
+ |
1252
+ |
1253
+ g⊤
1254
+ 1
1255
+ g⊤
1256
+ 2
1257
+ . . .
1258
+ g⊤
1259
+ n
1260
+ |
1261
+ |
1262
+ |
1263
+
1264
+ � ∈ Fn×n.
1265
+ Since ∥gi∥0 = d and gi is a cyclic shift of gi−1 for all i > 1, it follows that ∥ ¯G(i)∥0 = ∥ ¯G(j)∥0 = d for all i, j ∈ Nn, i.e. ¯G
1266
+ is sparsest and balanced. If we erase any s = n − k columns of ¯G, we get G ∈ Fn×k. By erasing arbitrary columns of ¯G, the
1267
+ resulting G is not balanced, i.e. we have ∥G(i)∥0 ̸= ∥G(j)∥0 for some pairs i, j ∈ Nn. Similar to our construction based on BRS
1268
+ generator matrices, we define the encoding matrix to be ˜G = IT ⊗ G. The local encodings are then analogous to (14).
1269
+ Consider an arbitrary set of k non-straggling workers I ⊊ Nn, and the corresponding matrix GI ∈ Fk×k. By [36, Lemma 12,
1270
+ B4.], GI is invertible. The decoding matrix is then ˜DI = IT ⊗ (GI)−1, and the condition ˜DI ˜G = IN is met.
1271
+
1272
+ D. Optimality of MDS BRS Codes
1273
+ Under the assumption that k = n − s, by utilizing the BRSq[n, k] generator matrices, we achieved the minimum possible
1274
+ communication load from the workers to the coordinator. From our discussion in V-A, we cannot hope to receive an encoding
1275
+ of less than N 2/k symbols; when we require that k workers respond with the same amount of information symbols in order
1276
+ to recover �
1277
+ A−1 ∈ RN×N, unless we make further assumptions on the structure of A and A−1. Each encoding Wι consists
1278
+ of NT = N 2/k symbols, so we have achieved the lower bound on the minimum amount of information needed to be sent to
1279
+ the coordinator. Hence, Wι ∈ CT ×N for any sparsest-balance generator MDS matrix. This also holds true for other generator
1280
+ matrices which can be used in Theorem 6, as the encodings are linear (e.g. Proposition 7).
1281
+ We also require the workers to estimate the least possible number of columns for the given recovery threshold k. For our choice
1282
+ of parameters, the bound of [20, Theorem 1] is met with equality. That is, for all i ∈ Nn:
1283
+ ∥G(i)∥0 = w = k
1284
+ n · d = k
1285
+ n · (n − k + 1) ,
1286
+ which means that for homogeneous workers, we cannot get a sparser generator matrix. This, along with the requirement that GI
1287
+ should be invertible for all possible I, are what we considered in (2).
1288
+ VI. CONCLUSION AND FUTURE WORK
1289
+ In this paper, we addressed the problem of approximate computation of the inverse of a matrix distributively in a FL setting,
1290
+ under the possible presence of straggling workers. We provided approximation error bounds for our approach, as well as security
1291
+ and recovery guarantees. We also provided numerical experiments that validated our proposed approach.
1292
+ There are several interesting future directions. One is looking into the issue of numerical stability of the BRS approach, and
1293
+ exploring other suitable generator matrices, e.g. circulant permutation and rotation matrices [37]. Another direction, is leveraging
1294
+ approximate CCMs. The techniques of [22], [38] suggest that carefully selecting interpolation points may lead to more efficient
1295
+ (approximate) schemes. In terms of coding-theory, it would be interesting to see if it is possible to reduce the complexity of our
1296
+ decoding step. Specifically, could well-known RS decoding algorithms such as the Berlekamp-Welch algorithm be exploited?
1297
+ Another important extension is to reduce the communication rounds when computing the pseudoinverse through our approach.
1298
+ This depends on the CMM which is being utilized, though using different ones for each of the two multiplications may also be
1299
+ beneficial.
1300
+ Tribute to Alex Vardy: As this is a special issue dedicated to the memory Alexander Vardy, we mention how this paper relates
1301
+ to some of his work. Even though Alex had not worked on CC, his contributions to RS codes are immense. A focus of this paper
1302
+ is to reduce the decoding complexity of the proposed BRS-based CCM, while in [39] it was shown that ML decoding of RS
1303
+ codes is NP-hard. Another highly innovative work of Vardy’s is [40], in which the ‘Parvaresh-Vardy codes’ were introduced;
1304
+ and the associated list-decoding algorithm was shown to yield an improvement over the Guruswami–Sudan algorithm. This was
1305
+ subsequently improved by Guruswami and Rudra [41], whose techniques were exploited in [42] to introduce list-decoding in CC.
1306
+ APPENDIX A
1307
+ ADDITIONAL MATERIAL AND BACKGROUND
1308
+ In this appendix, we include material and background which was used in our derivations. First, we recall what an ϵ-optimal
1309
+ solution/point is, which was used in the proof of Proposition 4. Next, we state the MDS Theorem and the BCH Bound. We
1310
+ then give a brief overview of the GC scheme from [8], to show how it differs from our coded matrix inversion scheme. We also
1311
+ explicitly give their construction of a balanced mask matrix M ∈ {0, 1}n×k, which we use for the construction of the BRS
1312
+ generator matrices. Lastly, we illustrate a simple example of the encoding matrix.
1313
+
1314
+ 14
1315
+ Definition 8 ( [43]). A point ¯x is said to be an ϵ-optimal solution/point to a minimization problem with objective function f(x),
1316
+ if for any x, it holds that f(x) ⩾ f(¯x) − ϵ, where ϵ ⩾ 0. When ϵ = 0, an ϵ-optimal solution is an exact minimizer.
1317
+ Theorem 9 (MDS Theorem — [35]). Let C be a linear [n, k, d] code over Fq, with G, H the generator and parity-check matrices.
1318
+ Then, the following are equivalent:
1319
+ 1) C is a MDS code, i.e. d = n − k + 1
1320
+ 2) every set of n − k columns of H is linearly independent
1321
+ 3) every set of k columns of G is linearly independent
1322
+ 4) C⊥ is a MDS code.
1323
+ Theorem 10 (BCH Bound — [15], [25]). Let p(x) ∈ Fq[x]\{0} with t cyclically consecutive roots, i.e. p(αj+ι) = 0 for all
1324
+ ι ∈ Nt. Then, at least t + 1 coefficients of p(x) are nonzero.
1325
+ Algorithm 2: MaskMatrix(n, k, d) [8]
1326
+ Input: n, k, d ∈ Z+ s.t. n > d, k and w = kd
1327
+ n
1328
+ Output: row-balanced mask matrix M ∈ {0, 1}n×k
1329
+ M ← 0n×k
1330
+ for j = 0 to k − 1 do
1331
+ for i = 0 to d − 1 do
1332
+ ι ← (i + jd + 1) mod n
1333
+ Mr,ι ← 1
1334
+ end
1335
+ end
1336
+ return M
1337
+ Even though this was not pointed out in [8], Algorithm 2 does not always produce a mask matrix of the given parameters when
1338
+ we select d < n/2. This is why in our work we require d ⩾ n/2.
1339
+ The decomposition G = HP is utilized in the GC scheme of [8]. Each column of G corresponds to a partition of the data
1340
+ whose partial gradient is to be computed. The polynomials are judiciously constructed in this scheme, such that the constant term
1341
+ of each polynomial is 1 for all polynomials, thus P(1) = ⃗1. By this, the decoding vector a⊤
1342
+ I is the first row of G−1
1343
+ I , for which
1344
+ a⊤
1345
+ I GI = e⊤
1346
+ 1 . A direct consequence of this is that a⊤
1347
+ I BI = e⊤
1348
+ 1 T = T(1) = ⃗1, which is the objective for constructing a GC
1349
+ scheme.
1350
+ A. Generator Matrix Example
1351
+ As an example, consider the case where n = 9, k = 6 and d = 6, thus w = kd
1352
+ n = 4. Then, Algorithm 2 produces
1353
+ M =
1354
+
1355
+
1356
+
1357
+
1358
+
1359
+
1360
+
1361
+
1362
+
1363
+
1364
+
1365
+
1366
+
1367
+
1368
+ 1
1369
+ 1
1370
+ 0
1371
+ 1
1372
+ 1
1373
+ 0
1374
+ 1
1375
+ 1
1376
+ 0
1377
+ 1
1378
+ 1
1379
+ 0
1380
+ 1
1381
+ 1
1382
+ 0
1383
+ 1
1384
+ 1
1385
+ 0
1386
+ 1
1387
+ 0
1388
+ 1
1389
+ 1
1390
+ 0
1391
+ 1
1392
+ 1
1393
+ 0
1394
+ 1
1395
+ 1
1396
+ 0
1397
+ 1
1398
+ 1
1399
+ 0
1400
+ 1
1401
+ 1
1402
+ 0
1403
+ 1
1404
+ 0
1405
+ 1
1406
+ 1
1407
+ 0
1408
+ 1
1409
+ 1
1410
+ 0
1411
+ 1
1412
+ 1
1413
+ 0
1414
+ 1
1415
+ 1
1416
+ 0
1417
+ 1
1418
+ 1
1419
+ 0
1420
+ 1
1421
+ 1
1422
+
1423
+
1424
+
1425
+
1426
+
1427
+
1428
+
1429
+
1430
+
1431
+
1432
+
1433
+
1434
+
1435
+
1436
+ ∈ {0, 1}9×6 .
1437
+ For our CCM, this means that the ith worker computes the blocks indexed by supp(M(i)), e.g. supp(M(1)) = {1, 2, 4, 5}. We
1438
+ denote the indices of the respective task allocations by Ji = supp(M(i)). The entries of the generator matrix G are the evaluations
1439
+ of the constructed polynomials (3) at each of the evaluation points B = {βi}n
1440
+ i=1, i.e. Gij = pj(αi). This results in:
1441
+ G =
1442
+
1443
+
1444
+
1445
+
1446
+
1447
+
1448
+
1449
+
1450
+
1451
+
1452
+
1453
+
1454
+
1455
+
1456
+ p1(β1)
1457
+ p2(β1)
1458
+ 0
1459
+ p4(β1)
1460
+ p5(β1)
1461
+ 0
1462
+ p1(β2)
1463
+ p2(β2)
1464
+ 0
1465
+ p4(β2)
1466
+ p5(β2)
1467
+ 0
1468
+ p1(β3)
1469
+ p2(β3)
1470
+ 0
1471
+ p4(β3)
1472
+ p5(β3)
1473
+ 0
1474
+ p1(β4)
1475
+ 0
1476
+ p3(β4)
1477
+ p4(β4)
1478
+ 0
1479
+ p6(β4)
1480
+ p1(β5)
1481
+ 0
1482
+ p3(β5)
1483
+ p4(β5)
1484
+ 0
1485
+ p6(β5)
1486
+ p1(β6)
1487
+ 0
1488
+ p3(β6)
1489
+ p4(β6)
1490
+ 0
1491
+ p6(β6)
1492
+ 0
1493
+ p2(β7)
1494
+ p3(β7)
1495
+ 0
1496
+ p5(β7)
1497
+ p6(β7)
1498
+ 0
1499
+ p2(β8)
1500
+ p3(β8)
1501
+ 0
1502
+ p5(β8)
1503
+ p6(β8)
1504
+ 0
1505
+ p2(β9)
1506
+ p3(β9)
1507
+ 0
1508
+ p5(β9)
1509
+ p6(β9)
1510
+
1511
+
1512
+
1513
+
1514
+
1515
+
1516
+
1517
+
1518
+
1519
+
1520
+
1521
+
1522
+
1523
+
1524
+ .
1525
+
1526
+ 15
1527
+ APPENDIX B
1528
+ DISTRIBUTED PSEUDOINVERSE
1529
+ For full-rank rectangular matrices A ∈ RN×M where N > M, one resorts to the left Moore–Penrose pseudoinverse A† ∈
1530
+ RM×N, for which A†A = IM. In Algorithm 3, we present how to approximate the left pseudoinverse of A, by using the fact that
1531
+ A† = (A⊤A)−1A��; since A⊤A ∈ GLN(R). The right pseudoinverse A† = A⊤(AA⊤)−1 of A ∈ RM×N where M < N,
1532
+ can be obtained by a modification of Algorithm 3.
1533
+ Just like the inverse, the pseudoinverse of a matrix also appears in a variety of applications. Computing the pseudoinverse of
1534
+ A ∈ RN×M for N > M is even more cumbersome, as it requires inverting the Gram matrix A⊤A. For this subsection, we
1535
+ consider a full-rank matrix A.
1536
+ One could naively attempt to modify Algorithm 1 in order to retrieve A† such that A†A = IM, by approximating the rows
1537
+ of A†. This would not work, as the underlying optimization problems would not be strictly convex. Instead, we use Algorithm
1538
+ 3 to estimate the rows of B−1 := (A⊤A)−1, and then multiply the estimate �
1539
+ B−1 by A⊤. This gives us the approximation
1540
+
1541
+ A† = �
1542
+ B−1 · A⊤.
1543
+ The drawback of Algorithm 3 is that it requires two additional matrix multiplications, A⊤A and �
1544
+ B−1A⊤. We overcome this
1545
+ barrier by using a CMM scheme twice, to recover �
1546
+ A† in a two or three-round communication CC approach. These are discussed
1547
+ in below.
1548
+ Bounds on errF ( �
1549
+ A−1) and errrF ( �
1550
+ A−1) can be established for both algorithms, specific to the black-box least squares algorithm
1551
+ being utilized. This is left for future work.
1552
+ Algorithm 3: Estimating A†
1553
+ Input: full-rank A ∈ RN×M where N > M
1554
+ B ← A⊤A
1555
+ for i=1 to M do
1556
+ ˆci = arg minc∈R1×M
1557
+
1558
+ gi(c) := ∥cB − e⊤
1559
+ i ∥2
1560
+ 2
1561
+
1562
+ ˆbi ← ˆci · A⊤
1563
+ end
1564
+ return �
1565
+ A† ←
1566
+
1567
+ ˆb⊤
1568
+ 1 · · · ˆb⊤
1569
+ M
1570
+ �⊤
1571
+ ▷ �
1572
+ A†(i) = ˆbi
1573
+ Corollary 11. For full-rank A ∈ RN×M with N > M, we have errF (�
1574
+ A†) ⩽
1575
+
1576
+ Mϵ·κ2
1577
+
1578
+ 2σmin(A)3 and errrF (�
1579
+ A†) ⩽
1580
+
1581
+ Mϵ·κ2
1582
+
1583
+ 2σmin(A)2 when
1584
+ using SD to solve the subroutine optimization problems of Algorithm 3, with termination criteria ∥∇gi(c[t])∥2 ⩽ ϵ.
1585
+ Proof. From (10), it follows that
1586
+ ∥B−1ei − ˆc⊤
1587
+ i ∥2 ⩽
1588
+ ϵ/
1589
+
1590
+ 2
1591
+ σmin(B)2 =
1592
+ ϵ/
1593
+
1594
+ 2
1595
+ σmin(A)4 =: δ .
1596
+ The above bound implies that for each summand of the Frobenius error; ∥ˆbi − A†
1597
+ (i)∥2 = ∥ˆciA⊤ − e⊤
1598
+ i · B−1A⊤∥2, we have
1599
+ ∥ˆbi − A†
1600
+ (i)∥2 ⩽ δ∥A⊤∥2. Summing the right hand side M times, we get that
1601
+ errF (�
1602
+ A†)2 ⩽ M · (δ∥A⊤∥2)2
1603
+ = Mϵ2 · σmax(A)2
1604
+ σmin(A)8
1605
+ = Mϵ2 · κ2
1606
+ 2
1607
+ σmin(A)6 .
1608
+ By taking the square root, we have shown the first claim.
1609
+ Since 1/σmin(A) = ∥A†∥2 ⩽ ∥A†∥F , it then follows that
1610
+ errrF (�
1611
+ A†) = errF (�
1612
+ A†)
1613
+ ∥A†∥F
1614
+ ⩽ errF (�
1615
+ A†)
1616
+ ∥A†∥2
1617
+ =⩽
1618
+
1619
+ Mϵ · κ2
1620
+
1621
+ 2σmin(A)2 ,
1622
+ which completes the proof.
1623
+
1624
+ A. Pseudoinverse from Polynomial CMM
1625
+ One approach to leverage Algorithm 3 in a two-round communication scheme is to first compute B = A⊤A through a CMM
1626
+ scheme, then share B with all the workers who estimate the rows of �
1627
+ B−1, and finally use another CMM to locally encode the
1628
+
1629
+ 16
1630
+ estimated columns with blocks of A⊤; to recover �
1631
+ A† = �
1632
+ B−1 · A⊤. Even though there are only two rounds of communication, the
1633
+ fact that we have a local encoding by the workers results in a higher communication load overall. An alternative approach which
1634
+ circumvents this issue, uses three-rounds of communication.
1635
+ For this approach, we use the polynomial CMM scheme from [7] twice, along with our coded matrix inversion scheme. This
1636
+ CMM has a reduced communication load, and minimal computation is required by the workers. To have a consistent recovery
1637
+ threshold across our communication rounds, we partition A as in (11) into ¯k = √n − s =
1638
+
1639
+ k blocks. Each block is of size
1640
+ N × ¯T, for ¯T = M
1641
+ k . The encodings from [7] of the partitions {Aj}¯k
1642
+ j=1 for carefully selected parameters a, b ∈ Z+ and distinct
1643
+ elements γi ∈ Fq, are
1644
+ ˜Aa
1645
+ i =
1646
+ k
1647
+
1648
+ j=1
1649
+ Ajγ(j−1)a
1650
+ i
1651
+ and
1652
+ ˜Ab
1653
+ i =
1654
+ k
1655
+
1656
+ j=1
1657
+ Ajγ(j−1)b
1658
+ i
1659
+ for each worker indexed by i. Thus, each encoding is comprised of N ¯T symbols. The workers compute the product of their
1660
+ respective encodings ( ˜Aa
1661
+ i )⊤ · ˜Ab
1662
+ i. The decoding step corresponds to an interpolation step, which is achievable when ¯k2 = k many
1663
+ workers respond5, which is the optimal recovery threshold for CMM. Any fast polynomial interpolation or RS decoding algorithm
1664
+ can be used for this step, to recover B.
1665
+ Next, the master shares B with all the workers (from V-A, this is necessary), who are requested to estimate the column-blocks
1666
+ of �
1667
+ B−1
1668
+
1669
+ B−1 =
1670
+
1671
+ ¯B1 · · · ¯Bk
1672
+
1673
+ where ¯Bj ∈ RM× ¯T ∀j ∈ Nk
1674
+ (15)
1675
+ according to Algorithm 1. We can then recover �
1676
+ B−1 by our BRS based scheme, once k workers send their encoding.
1677
+ For the final round, we encode �
1678
+ B−1 as
1679
+ ˜Ba
1680
+ i =
1681
+ k
1682
+
1683
+ j=1
1684
+ ¯Bjγ(j−1)a
1685
+ i
1686
+ which are sent to the respective workers. The workers already have in their possession the encodings ˜Ab
1687
+ i. We then carry out the
1688
+ polynomial CMM where each worker is requested to send back ( ˜Ba
1689
+ i )⊤ · ˜Ab
1690
+ i. The master server can then recover �
1691
+ A†.
1692
+ Theorem 12. Consider G ∈ Fn×k as in Theorem 6. By using any CMM, we can devise a matrix pseudoinverse CCM by utilizing
1693
+ Algorithm 3, in two-rounds of communication. By using polynomial CMM [7], we achieve this with a reduced communication
1694
+ load and minimal computation, in three-rounds of communication.
1695
+ REFERENCES
1696
+ [1] N. Charalambides, M. Pilanci, and A. O. Hero, “Secure Linear MDS Coded Matrix Inversion,” in 2022 58th Annual Allerton Conference on Communication,
1697
+ Control, and Computing (Allerton), 2022, pp. 1–8.
1698
+ [2] B. G. Greenberg and A. E. Sarhan, “Matrix inversion, its interest and application in analysis of data,” Journal of the American Statistical Association, vol. 54,
1699
+ no. 288, pp. 755–766, 1959.
1700
+ [3] N. J. Higham, Accuracy and Stability of Numerical Algorithms, 2nd ed.
1701
+ USA: Society for Industrial and Applied Mathematics, 2002.
1702
+ [4] K. Lee, M. Lam, R. Pedarsani, D. Papailiopoulos, and K. Ramchandran, “Speeding up distributed machine learning using codes,” IEEE Transactions on
1703
+ Information Theory, vol. 64, no. 3, pp. 1514–1529, 2017.
1704
+ [5] Q. Yu, S. Li, N. Raviv, S. M. M. Kalan, M. Soltanolkotabi, and S. Avestimehr, “Lagrange Coded Computing: Optimal design for resiliency, security and
1705
+ privacy,” arXiv preprint arXiv:1806.00939, 2018.
1706
+ [6] S. Li and S. Avestimehr, “Coded Computing,” Foundations and Trends® in Communications and Information Theory, vol. 17, no. 1, 2020.
1707
+ [7] Q. Yu, M. Maddah-Ali, and S. Avestimehr, “Polynomial codes: an optimal design for high-dimensional coded matrix multiplication,” in Advances in Neural
1708
+ Information Processing Systems, 2017, pp. 4403–4413.
1709
+ [8] W. Halbawi, N. Azizan, F. Salehi, and B. Hassibi, “Improving Distributed Gradient Descent Using Reed-Solomon Codes,” in 2018 IEEE International
1710
+ Symposium on Information Theory (ISIT).
1711
+ IEEE, 2018, pp. 2027–2031.
1712
+ [9] S. Dhakal, S. Prakash, Y. Yona, S. Talwar, and N. Himayat, “Coded Federated Learning,” in 2019 IEEE Globecom Workshops (GC Wkshps).
1713
+ IEEE, 2019,
1714
+ pp. 1–6.
1715
+ [10] S. Prakash, S. Dhakal, M. R. Akdeniz, Y. Yona, S. Talwar, S. Avestimehr, and N. Himayat, “Coded Computing for Low-Latency Federated Learning over
1716
+ Wireless Edge Networks,” IEEE Journal on Selected Areas in Communications, vol. 39, no. 1, pp. 233–250, 2020.
1717
+ [11] R. Schlegel, S. Kumar, E. Rosnes, and A. G. i. Amat, “CodedPaddedFL and CodedSecAgg: Straggler Mitigation and Secure Aggregation in Federated
1718
+ Learning,” arXiv e-prints, pp. arXiv–2112, 2021.
1719
+ [12] S. Kumar, R. Schlegel, E. Rosnes, and A. G. i. Amat, “Coding for Straggler Mitigation in Federated Learning,” arXiv preprint arXiv:2109.15226, 2021.
1720
+ [13] M. Xhemrishi, A. G. i. Amat, E. Rosnes, and A. Wachter-Zeh, “Computational Code-Based Privacy in Coded Federated Learning,” arXiv preprint
1721
+ arXiv:2202.13798, 2022.
1722
+ [14] S. Ha, J. Zhang, O. Simeone, and J. Kang, “Coded Federated Computing in Wireless Networks with Straggling Devices and Imperfect CSI,” in 2019 IEEE
1723
+ International Symposium on Information Theory (ISIT), 2019, pp. 2649–2653.
1724
+ [15] W. Halbawi, Z. Liu, and B. Hassibi, “Balanced Reed-Solomon Codes,” in 2016 IEEE International Symposium on Information Theory (ISIT).
1725
+ IEEE, 2016,
1726
+ pp. 935–939.
1727
+ [16] ——, “Balanced Reed-Solomon Codes for all parameters,” in 2016 IEEE Information Theory Workshop (ITW).
1728
+ IEEE, 2016, pp. 409–413.
1729
+ [17] N. Charalambides, H. Mahdavifar, and A. O. Hero, “Numerically Stable Binary Gradient Coding,” arXiv preprint arXiv:2001.11449, 2020.
1730
+ [18] J. Koneˇcn`y, H. B. McMahan, D. Ramage, and P. Richtárik, “Federated optimization: Distributed machine learning for on-device intelligence,” arXiv preprint
1731
+ arXiv:1610.02527, 2016.
1732
+ 5We select ¯k =
1733
+
1734
+ k in the partitioning of A in (11) when deploying this CMM, to attain the same recovery threshold as our inversion scheme.
1735
+
1736
+ 17
1737
+ [19] Y. Yang, P. Grover, and S. Kar, “Coded distributed computing for inverse problems,” in Advances in Neural Information Processing Systems, vol. 30. Curran
1738
+ Associates, Inc., 2017, pp. 709–719.
1739
+ [20] R. Tandon, Q. Lei, A. G. Dimakis, and N. Karampatziakis, “Gradient coding: Avoiding stragglers in distributed learning,” in International Conference on
1740
+ Machine Learning, 2017, pp. 3368–3376.
1741
+ [21] M. Soleymani, H. Mahdavifar, and A. S. Avestimehr, “Analog Lagrange Coded Computing,” IEEE Journal on Selected Areas in Information Theory, vol. 2,
1742
+ no. 1, pp. 283–295, 2021.
1743
+ [22] S. Kiani and S. C. Draper, “Successive Approximation Coding for Distributed Matrix Multiplication,” arXiv preprint arXiv:2201.03486, 2022.
1744
+ [23] S. H. Dau, W. Song, Z. Dong, and C. Yuen, “Balanced Sparsest Generator Matrices for MDS Codes,” in 2013 IEEE International Symposium on Information
1745
+ Theory, 2013, pp. 1889–1893.
1746
+ [24] M. Krause, “A Simple Proof of the Gale-Ryser Theorem,” The American Mathematical Monthly, vol. 103, no. 4, pp. 335–337, 1996.
1747
+ [25] R. J. McEliece, Theory of Information and Coding, 2nd ed.
1748
+ USA: Cambridge University Press, 2001.
1749
+ [26] J. Alman and V. V. Williams, “A refined laser method and faster matrix multiplication,” arXiv preprint arXiv:2010.05846, 2020.
1750
+ [27] Å. Björck and V. Pereyra, “Solution of Vandermonde Systems of Equations,” Mathematics of Computation, vol. 24, pp. 893–903, 1970.
1751
+ [28] S. P. Boyd and L. Vandenberghe, Convex optimization.
1752
+ Cambridge university press, 2004.
1753
+ [29] J. R. Shewchuk, “An Introduction to the Conjugate Gradient Method Without the Agonizing Pain,” 1994.
1754
+ [30] L. N. Trefethen and D. Bau III, Numerical linear algebra.
1755
+ Siam, 1997, vol. 50.
1756
+ [31] S. Bubeck, “Convex optimization: Algorithms and complexity,” Foundations and Trends® in Machine Learning, vol. 8, no. 3-4, pp. 231–357, 2015.
1757
+ [Online]. Available: http://dx.doi.org/10.1561/2200000050
1758
+ [32] R. M. Gower, “Sketch and Project: Randomized Iterative Methods for Linear Systems and Inverting Matrices,” arXiv preprint arXiv:1612.06013, 2016.
1759
+ [33] T. M. Cover and J. A. Thomas, Elements of Information Theory (Wiley Series in Telecommunications and Signal Processing).
1760
+ USA: Wiley-Interscience,
1761
+ 2006.
1762
+ [34] A. Shamir, “How to Share a Secret,” Communications of the ACM, vol. 22, no. 11, pp. 612–613, 1979.
1763
+ [35] S. Ling and C. Xing, Coding Theory: A First Course.
1764
+ Cambridge University Press, 2004.
1765
+ [36] N. Raviv, I. Tamo, R. Tandon, and A. G. Dimakis, “Gradient Coding from Cyclic MDS Codes and Expander Graphs,” IEEE Transactions on Information
1766
+ Theory, vol. 66, no. 12, pp. 7475–7489, 2020.
1767
+ [37] A. Ramamoorthy and L. Tang, “Numerically stable coded matrix computations via circulant and rotation matrix embeddings,” IEEE Transactions on
1768
+ Information Theory, vol. 68, no. 4, pp. 2684–2703, 2021.
1769
+ [38] H. Jeong, A. Devulapalli, V. R. Cadambe, and F. P. Calmon, “ϵ-Approximate Coded Matrix Multiplication Is Nearly Twice as Efficient as Exact
1770
+ Multiplication,” IEEE Journal on Selected Areas in Information Theory, vol. 2, no. 3, pp. 845–854, 2021.
1771
+ [39] V. Guruswami and A. Vardy, “Maximum-Likelihood Decoding of Reed-Solomon Codes is NP-hard,” IEEE Transactions on Information Theory, vol. 51,
1772
+ no. 7, pp. 2249–2256, 2005.
1773
+ [40] F. Parvaresh and A. Vardy, “Correcting Errors Beyond the Guruswami-Sudan Radius in Polynomial Time,” in 46th Annual IEEE Symposium on Foundations
1774
+ of Computer Science (FOCS’05).
1775
+ IEEE, 2005, pp. 285–294.
1776
+ [41] V. Guruswami and A. Rudra, “Explicit codes achieving list decoding capacity: Error-correction with optimal redundancy,” IEEE Transactions on Information
1777
+ Theory, vol. 54, no. 1, pp. 135–150, 2008.
1778
+ [42] M. Soleymani, R. E. Ali, H. Mahdavifar, and A. S. Avestimehr, “List-decodable coded computing: Breaking the adversarial toleration barrier,” IEEE Journal
1779
+ on Selected Areas in Information Theory, vol. 2, no. 3, pp. 867–878, 2021.
1780
+ [43] F. Bai, Z. Wu, and D. Zhu, “Sequential Lagrange multiplier condition for ϵ-optimal solution in convex programming,” Optimization, vol. 57, no. 5, pp.
1781
+ 669–680, 2008.
1782
+
9NE1T4oBgHgl3EQf7wX0/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
9NE2T4oBgHgl3EQflwcz/content/tmp_files/2301.03991v1.pdf.txt ADDED
@@ -0,0 +1,1075 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Constraining cosmological parameters from
2
+ N-body simulations with Variational Bayesian
3
+ Neural Networks
4
+ H´ector J. Hort´ua 1,2,∗, Luz ´Angela Garc´ıa 3 and Leonardo Casta˜neda C. 4
5
+ 1 Grupo Signos, Departamento de Matem´aticas, Universidad el Bosque, Bogot´a,
6
+ Colombia.
7
+ 2 Maestr´ıa en Ciencia de Datos, Universidad Escuela Colombiana de Ingenier´ıa
8
+ Julio Garavito Bogot´a, Colombia.
9
+ 3 Universidad ECCI. Cra. 19 No. 49-20, Bogot´a, Colombia, C´odigo Postal 111311.
10
+ 4Observatorio Astron´omico Nacional, Universidad Nacional de Colombia, Bogot´a,
11
+ Colombia.
12
+ Correspondence*:
13
+ -
14
+ hjhortuao@unal.edu.co
15
+ ABSTRACT
16
+ Methods based on Deep Learning have recently applied on astrophysical parameter recovery
17
+ thanks to their ability to capture information from complex data. One of these methods are the
18
+ approximate Bayesian Neural Networks (BNNs) which have demonstrated to yield consistent
19
+ posterior distribution into the parameter space, helpful for uncertainty quantification. However,
20
+ as any modern neural networks, they tend to produce overly confident uncertainty estimates,
21
+ and can introduce bias when BNNs are applied to data. In this work, we implement multiplicative
22
+ normalizing flows (MNFs), a family of approximate posteriors for the parameters of BNNs with the
23
+ purpose of enhancing the flexibility of the variational posterior distribution, to extract Ωm, h, and
24
+ σ8 from the QUIJOTE simulations. We have compared this method with respect to the standard
25
+ BNNs, and the flipout estimator. We found that MNFs combined with BNNs outperform the other
26
+ models obtaining predictive performance with almost one order of magnitude larger that standard
27
+ BNNs, σ8 extracted with high accuracy (r2 = 0.99), and precise uncertainty estimates. The
28
+ latter implies that MNFs provide more realistic predictive distribution closer to the true posterior
29
+ mitigating the bias introduced by the variational approximation, and allowing to work with well
30
+ calibrated networks.
31
+ Keywords: cosmology, N-body simulations, parameter estimation, artificial intelligence, deep neural networks
32
+ 1
33
+ INTRODUCTION
34
+ Cosmological simulations offer one of the most powerful ways to understand the initial conditions of
35
+ the Universe and improve our knowledge on fundamental physics (1). They open also the possibility to
36
+ fully explore the growth of structure in both the linear and non-linear regime. Currently, the concordance
37
+ cosmological model, Λ-CDM, gives an accurate description of most of the observations from early to late
38
+ stages of the Universe using a set of few parameters (2). Recent observations from Cosmic Microwave
39
+ Background (CMB) have provided such accurate estimation for the cosmological parameters, and prompted
40
+ a tension with respect to local scales measurement, along with a well-known degeneracy on the total
41
+ non-relativistic matter density parameters (3, 4, 5). Conventionally, the way to capture information from
42
+ 1
43
+ arXiv:2301.03991v1 [astro-ph.IM] 9 Jan 2023
44
+
45
+ Hort´ua et al.
46
+ Parameter estimation via BNNs
47
+ astronomical observations is to compare summary statistics from data against theory predictions. However,
48
+ two major difficulties arise: First, it is not well understood what kind of estimator, or at which degree of
49
+ approximation of order statistic should be better to extract the maximum information from observations. In
50
+ fact, the most common choice is the power spectrum(PS) which has shown to be a powerful tool for making
51
+ inference (2). However, It is well known that PS is not able to fully characterize the statistical properties
52
+ of non-Gaussian density fields, yielding that it would not be suitable for upcoming Large Scale Structure
53
+ (LSS) or 21-cm signals which are highly non-Gaussian (6, 7, 8). Then, PS will miss relevant information if
54
+ only this statistic is used for parameter recovery (9). Second, Cosmologists will require to store and process
55
+ a large number of data, which can be very expensive. Clearly, sophisticated computational tools along with
56
+ new perspectives on data collection, storage, and analysis must be developed in order to interpret these
57
+ observations (10).
58
+ In recent years, artificial intelligence (AI), and Deep Neural Networks (DNNs) have emerged as promising
59
+ tools to tackle the aforementioned difficulties in the cosmological context due to its capability for
60
+ learning relationships between variables in complex data, outperform traditional estimators, and handle
61
+ the demanding computational needs in Astrophysics and Cosmology (10). These standard DNNs have
62
+ been used on a variety of tasks because of their potential for solving inverse problems. However, they
63
+ are prone to overfitting due to the excessive number of parameters to be adjusted, and the lack of
64
+ explanations of their predictions for given instances (11). The latter is crucial for cosmological analysis
65
+ where assessing robustness and reliability of the model predictions are imperative. This problem can be
66
+ addressed by endowing DNNs with probabilistic properties that permit quantifying posterior distributions
67
+ on their outcomes, and provide them with predictive uncertainties. One of these approaches is the
68
+ use of Bayesian Neural Networks (BNNs) comprised of probabilistic layers that capture uncertainty
69
+ over the network parameters (weights), and trained using Bayesian inference (12). Several works have
70
+ utilized BNNs in cosmological scenarios where the combination of DNNs (through Convolutional Neural
71
+ Networks, CNNs) and probabilistic properties, allow to build models adapted to non-Gaussian data
72
+ without requiring a priori choice summary statistic (9, 13, 14, 15), along with quantifying predictive
73
+ uncertainties (16, 17, 18, 19, 20, 21). Indeed, BNNs permit to infer posterior distributions instead of point
74
+ estimates for the weights. These distributions capture the parameter uncertainty, and by subsequently
75
+ integrating over them, we acquire uncertainties related to the network outputs. Nevertheless, obtaining the
76
+ posterior distributions is an intractable task, and approximate techniques such as a Variational Inference(VI)
77
+ must be used in order to put them into practice (22). Despite the approximate posterior distribution over
78
+ the weights employed in VI clearly providing fast computations for inference tasks, they can also introduce
79
+ a degree of bias depending on how complex(or simple) the choice of the approximate distribution family
80
+ is (23). This issue yields overconfident uncertainty predictions and an unsatisfactory closeness measurement
81
+ with respect to the true posterior. In (17, 18), the authors included normalizing flows on the top of BNNs to
82
+ give the joint parameter distribution more flexibility. However, that approach is not implemented into the
83
+ Bayesian framework, preserving the bias.
84
+ In this paper, we attempt to enhance the flexibility of the approximate posterior distribution over the
85
+ weights of the network by employing multiplicative normalizing flows, resulting in accurate and precise
86
+ uncertainty estimates provided by BNNs. We apply this approach to N-body simulations taken from
87
+ QUIJOTE dataset (24) in order to show how BNNs can take not only advantage of non-Gaussian signals
88
+ without requiring a specifying the summary statistic (such as PS) but also, increase the posterior complexity,
89
+ as they yield much larger performance improvements. This paper is organized as follows. Section 2 offers a
90
+ summary of the BNNs framework and a detailed description of Normalizing flow implementation. Section 3
91
+ describes the dataset and analysis tools used in this paper. Numerical implementation and configuration for
92
+ 2
93
+
94
+ Hort´ua et al.
95
+ Parameter estimation via BNNs
96
+ BNNs are described in Section 4. Section 5 presents the results we obtained by training BNNs taking into
97
+ account different approaches and we display the inference of cosmological parameters. It also outlines the
98
+ calibration diagrams to determine the accuracy of the uncertainty estimates. Finally, Section 6 draws the
99
+ main conclusions of this work and possible further directions to the use of BNNs in Cosmology.
100
+ 2
101
+ VARIATIONAL BAYESIAN NEURAL NETWORKS
102
+ Here we go into detail about Bayesian Neural Networks (BNNs), and their implementation to perform
103
+ parameter inference. We start with a brief introduction, before focusing on improving the variational
104
+ approximation. We remind the reader to refer to (25, 26, 22) for further details.
105
+ 2.1
106
+ Approximate BNNs
107
+ The goal of BNNs is to infer the posterior distribution p(w|D) over the weights w of the network after
108
+ observing the data D = (X, Y ). This posterior can be obtained from Bayes law: p(w|D) ∼ p(D|w)p(w),
109
+ given a likelihood function p(D|w), and a prior on the weights p(w). Once the posterior has been computed,
110
+ the probability distribution on a new test example x∗ is given by
111
+ p(y∗|x∗, D) =
112
+
113
+ w
114
+ p(y∗|x∗, w)p(w|D)dw,
115
+ (1)
116
+ where p(y∗|x∗, w) is the predictive distribution for a given value of the weights. For neural networks,
117
+ however, computing the exact posterior is intractable, so one must resort to approximate BNNs for
118
+ inference (26). A popular method to approximate the posterior is variational inference(VI) (22). Let
119
+ q(w|θ) be a family of simple distributions parameterized by θ. So, the goal of VI is to select a distribution
120
+ q(w|θ∗) such that θ∗ minimizes KL
121
+
122
+ q(w|θ)
123
+ ��p(w|D)
124
+
125
+ , being KL[·∥·] the Kullback-Leibler divergence.
126
+ This minimization is equivalent to maximizing the evidence lower bound (ELBO) (26)
127
+ ELBO(θ) = Eq(w|θ)
128
+
129
+ log p(Y |X, w)
130
+
131
+ − KL
132
+
133
+ q(w|θ)
134
+ ��p(w)
135
+
136
+ ,
137
+ (2)
138
+ where Eq(w|θ)[log p(Y |X, w)] is the expected log-likelihood with respect to the variational posterior and
139
+ KL[q(w|θ)||p(w)] is the divergence of the variational posterior from the prior. We can observe from Eq. 2
140
+ that the KL divergence acts as a regularizer that encourages the variational posterior moves towards
141
+ the modes of the prior. A common choice for the variational posterior is a product of independent (i.e.,
142
+ mean-field) Gaussian distributions, one distribution for each parameter w in the network (25)
143
+ q(w|θ) =
144
+
145
+ ij
146
+ N(w; µij, σ2
147
+ ij)
148
+ (3)
149
+ being i and j the indices of the neurons from the previous layer and the current layer respectively. Applying
150
+ the reparametrization trick we arrive at wij = µij + σij ∗ ϵij, where ϵij is drawn from a standard normal
151
+ distribution. Furthermore, if the prior is also a product of independent Gaussians, the KL divergence
152
+ between the prior and the variational posterior be computed analytically, which makes this approach
153
+ computationally efficient.
154
+ 2.1.1
155
+ Flipout
156
+ In case where sampling from q(w|θ) is not fully independently for different examples in a mini-batch, we
157
+ well obtain gradient estimates with high variance. Flipout method provides an alternative to decorrelate the
158
+ 3
159
+
160
+ Hort´ua et al.
161
+ Parameter estimation via BNNs
162
+ gradients within a mini batch by implicitly sampling pseudo-independent weights for each example (27).
163
+ The method requires two assumptions about the properties of q(w|θ): symmetric with respect to zero,
164
+ and the weights of the network are independent. Under these assumptions, the distribution is invariant to
165
+ element wise multiplication by a random sign matrix ˆr, i.e., ˆw = w◦ˆr, implies that w ∼ q(w) ≈ ˆw ∼ ˆq( ˆw).
166
+ Therefore, the marginal distribution over gradients computed for individual examples will be identical to
167
+ the distribution computed using shared weights samples. Hence, Flipout achieves much lower variance
168
+ updates when averaging over a mini batch. We validate this approach experimentally by comparing against
169
+ Multiplicative normalizing flows.
170
+ 2.2
171
+ Uncertainty in BNNs
172
+ BNNs offer a groundwork to incorporate from the posterior distribution both, the uncertainty inherent to
173
+ the data (aleatoric uncertainty), and the uncertainty in the model parameters due to a limited amount of
174
+ training data (epistemic uncertainty) (28). Following (16), assuming that the top of the BNNs consist of a
175
+ mean vector µ ∈ RN and a covariance matrix Σ ∈ RN(N+1)/21, and for a given fixed input x∗, T forward
176
+ passes of the network are computed, obtaining for each of their mean µt and covariance matrix Σt. Then,
177
+ an estimator for approximate the predictive covariance can be written as
178
+
179
+ Cov(y∗, y∗|x∗) ≈ 1
180
+ T
181
+ T
182
+
183
+ t=1
184
+ Σt
185
+
186
+ ��
187
+
188
+ Aleatoric
189
+ + 1
190
+ T
191
+ T
192
+
193
+ t=1
194
+ (µt − µ)(µt − µ)T
195
+
196
+ ��
197
+
198
+ Epistemic
199
+ ,
200
+ (4)
201
+ with µ = 1
202
+ T
203
+ �T
204
+ t=1 µt. Notice that in case Σ is diagonal, and σ2 = diag(Σ), the last equation reduces to the
205
+ results obtained in (29, 30)
206
+
207
+ Var(y∗|x∗) ≈ 1
208
+ T
209
+ T
210
+
211
+ t=1
212
+ σ2
213
+ t
214
+
215
+ ��
216
+
217
+ Aleatoric
218
+ + 1
219
+ T
220
+ T
221
+
222
+ t=1
223
+ (µt − ¯µ)2
224
+
225
+ ��
226
+
227
+ Epistemic
228
+ .
229
+ (5)
230
+ In this scenario, BNNs can be used to learn the correlations between the the targets and produce estimates
231
+ of their uncertainties. Unfortunately, the uncertainty computed from Eqs. 4, 5, tends to be miscalibrated, i.e.,
232
+ the predicted uncertainty (taking into account both epistemic and aleatoric uncertainty) is underestimated
233
+ and does not allow robust detection of uncertain predictions at inference. Therefore, calibration diagrams
234
+ along with methods to jointly calibrate aleatoric and epistemic uncertainties, must be employed before
235
+ inferring predictions from BNNs (31). We come back to this point in Section 5.
236
+ 2.3
237
+ Multiplicative normalizing flows
238
+ As mentioned previously, the most common family for the variational posterior used in BNNs is the
239
+ mean-field Gaussian distributions defined in Eq. 3. This simple distribution is unable to capture the
240
+ complexity of the true posterior. Therefore, we expect that increasing the complexity of the variational
241
+ posterior, BNNs achieve significant performance gains since we are now able to sample from a complicate
242
+ distribution that more closely resembles the true posterior. Certainly, transforming the variational posterior
243
+ must be followed with fast computations and still being numerically tractable. We now describe in detail
244
+ the Multiplicative Normalizing Flows (MNFs) method that provides flexible posterior distributions in an
245
+ 1 Where the targets y ∈ RN.
246
+ 4
247
+
248
+ Hort´ua et al.
249
+ Parameter estimation via BNNs
250
+ efficient way by employing auxiliary random variables and normalizing flows proposed by (32). MNFs
251
+ propose that the variational posterior can be expressed as an infinite mixture of distributions
252
+ q(w|θ) =
253
+
254
+ q(w|z, θ)q(z|θ)dz
255
+ (6)
256
+ where θ is the learnable posterior parameter, and z ∼ q(z|θ) ≡ q(z)2 is a vector with the same
257
+ dimension on the input layer, which plays the role of an auxiliary latent variable. Moreover, allowing local
258
+ reparametrizations, the variational posterior for fully connected layers become a modification of Eq. 3
259
+ written as
260
+ w ∼ q(w|z) =
261
+
262
+ ij
263
+ N(w; ziµij, σ2
264
+ ij).
265
+ (7)
266
+ Notice that by enhancing the complexity of q(z), we can increase the flexibility of the variational posterior.
267
+ This can be done using Normalizing Flows since the dimensionality of z is much lower compared to the
268
+ weights. Starting from samples z0 ∼ q(z0) from fully factorized Gaussian Eq. 3, a rich distribution q(zK)
269
+ can be obtained by applying a successively invertible K-transformations fK on z0
270
+ zK = NF(z0) = fK ◦ · · · ◦ f1(z0);
271
+ log q(zK) = log q(z0) −
272
+ K
273
+
274
+ k=1
275
+ log
276
+ ����det ∂fk
277
+ ∂zk−1
278
+ ���� .
279
+ (8)
280
+ Unfortunately, the KL divergence in Eq. 2 becomes generally intractable as the posterior q(w) is an
281
+ infinite mixture as shown in Eq. 6. This is addressed also in (33) by evoking Bayes law q(zK)q(w|zK) =
282
+ q(w)q(zK|w) and introducing an auxiliary distribution r(zK|w, φ) parameterized by φ, with the purpose
283
+ of approximating the posterior distribution of the original variational parameters q(zK|w) to further lower
284
+ bound the KL divergence term. Therefore, KL divergence term can be bounded as follows
285
+ − KL
286
+
287
+ q(w)
288
+ ��p(w)
289
+
290
+ = −Eq(w)
291
+
292
+ log
293
+ �q(w)
294
+ p(w)
295
+ ��
296
+ ≥ −Eq(w)
297
+
298
+ log
299
+ �q(w)
300
+ p(w)
301
+
302
+ + KL
303
+
304
+ q(zK|w)
305
+ ��r(zK|w, φ)
306
+ ��
307
+ = −Eq(w)
308
+
309
+ log
310
+ �q(w)
311
+ p(w)
312
+
313
+ + Eq(zK|w)
314
+
315
+ log
316
+ � q(zK|w)
317
+ r(zK|w, φ)
318
+ ���
319
+ = −Eq(w)
320
+
321
+ Eq(zK|w)
322
+
323
+ log
324
+ �q(w)
325
+ p(w)
326
+ ��
327
+ + Eq(zK|w)
328
+
329
+ log
330
+ � q(zK|w)
331
+ r(zK|w, φ)
332
+ ���
333
+ = −Eq(w,zK)
334
+
335
+ log
336
+ �q(w)
337
+ p(w)
338
+
339
+ + log
340
+ � q(zK|w)
341
+ r(zK|w, φ)
342
+ ��
343
+ = Eq(w,zK) [− log (q(w)q(zK|w)) + log r(zK|w, φ) + log p(w)] ⇒
344
+ − KL
345
+
346
+ q(w)
347
+ ��p(w)
348
+
349
+ ≥ Eq(w,zK)
350
+
351
+ − KL
352
+
353
+ q(w|zK)
354
+ ��p(w)
355
+
356
+ + log q(zK) + log r(zK|w, φ)
357
+
358
+ ,
359
+ (9)
360
+ where we have taken into account that KL[P∥Q] ≥ 0, and the equality is satisfied iff P = Q. In the last
361
+ line, the first term can be analytically computed since it will be the KL divergence between two Gaussian
362
+ distributions, while the second term is given by the Normalizing flow generated by fK as we observe in
363
+ 2 The parameter θ will be omitted in this section for clarity of notation.
364
+ 5
365
+
366
+ Hort´ua et al.
367
+ Parameter estimation via BNNs
368
+ Eq. 8. Finally, the auxiliary posterior term is parameterized by inverse normalizing flows as follows (34)
369
+ z0 = NF−1(zK) = g−1
370
+ 1
371
+ ◦ · · · ◦ g−1
372
+ K (zK);
373
+ log r(zK|w, φ) = log r(z0|w, φ) +
374
+ K
375
+
376
+ k=1
377
+ log
378
+ �����det ∂g−1
379
+ k
380
+ ∂zk
381
+ ����� , (10)
382
+ where one can parameterize g−1
383
+ K as another normalizing flow. In the paper (32), the authors also propose a
384
+ flexible parametrization of the auxiliary posterior as
385
+ z0 ∼ r(zK|w, φ) =
386
+
387
+ i
388
+ N(z0; ˜µi(w, φ), ˜σ2
389
+ i (w, φ)).
390
+ (11)
391
+ We will use the parameterization of the mean ˜µ, and the variance ˜σ2 as in the original paper as well as the
392
+ masked RealNVP (35) as choice of Normalizing flows.
393
+ 3
394
+ N-BODY SIMULATIONS DATASET
395
+ In this work, we leverage 2000 hypercubes simulation taken from The Quijote project (24). They
396
+ have been run using the TreePM code Gadget-III (36), and their initial conditions were generated at
397
+ z = 127 using 2LPT (37). The set chosen for this work is made of standard simulations with different
398
+ random seeds with the intention of emulating the cosmic variance. Each instance corresponds to a three-
399
+ dimensional distribution of the density field with size 643. The cosmological parameters vary according
400
+ to Ωm ∈ [0.1, 0.5], Ωb ∈ [0.03, 0.07], h ∈ [0.5, 0.9], ns ∈ [0.8, 1.2], σ8 ∈ [0.6, 1.0], while neutrino mass
401
+ (Mν = 0eV) and the equation of state parameter (w = −1) are kept fixed. The dataset was split into
402
+ training(70%), validation (10%), and test (20%), while hypercubes were logarithmic transformed and the
403
+ cosmological parameters normalized between 0 and 1. In this paper we will build BNNs with the ability to
404
+ predict three out of five aforementioned parameters, Ωm, σ8 and h.
405
+ 4
406
+ BNNS IMPLEMENTATION
407
+ We will consider three different BNNs architectures based on the discussion presented in Section 2: standard
408
+ BNNs (prior and variational posterior defined as a mean-field Normal distributions) [sBNNs]; BNNs with
409
+ Flipout estimator [FlipoutBNNs]; and Multiplicative normalizing flows [VBNNs]. The experiments were
410
+ implemented using the TensorFlow v:2.9 and TensorFlow-probability v:0.19 (38). All BNNs designed in this
411
+ paper are comprised of three parts. First, all experiments start with a 643-voxel input layer corresponding to
412
+ the normalised 3D density field followed by the fully-convolutional ResNet-18 backbone as it is presented
413
+ schematically in table 1. All the Resblock are fully pre-activated and their representation can be seen
414
+ in figure. 1. The repository Classification models 3D was used to build the backbone of BNNs (39).
415
+ Subsequently, the second part of BNNs represents the stochasticity of the network. This is comprised
416
+ of just one layer and it depends on the type of BNN used. For sBNNs, we employ the dense variational
417
+ layer which uses variational inference to fit an approximate posterior to the distribution over both the
418
+ kernel matrix and the bias terms. Here, we use as posterior and prior(no-trainable) Normal distributions.
419
+ Experiments with FlipoutBNNs for instance, are made via Flipout dense layer where the mean field normal
420
+ distribution are also utilized to parameterize the distributions. These two layers are already implemented in
421
+ the package TF-probability (38). On the other hand, for VBNNs we have adapted the class DenseMNF
422
+ implemented in the repositories TF-MNF, MNF-VBNN (32) to our model. Here, we use 50 layers for the
423
+ masked RealNVP NF, and the maximum variance for layer weights is around the unity. Finally, the last
424
+ 6
425
+
426
+ Hort´ua et al.
427
+ Parameter estimation via BNNs
428
+ ResNet-18 backbone
429
+ Layer Name
430
+ Input Shape
431
+ Output Shape
432
+ Batch Norm
433
+ (Nbatch, 64,64,64,3)
434
+ (Nbatch, 64,64,64,3)
435
+ 3D Convolutional
436
+ (Nbatch, 70,70,70,3)
437
+ (Nbatch, 32,32,32,64)
438
+ Batch Norm+ReLU
439
+ (Nbatch, 32,32,32,64)
440
+ (Nbatch, 32,32,32,64)
441
+ Max Pooling 3D
442
+ (Nbatch, 34,34,34,64)
443
+ (Nbatch, 16,16,16,64)
444
+ Batch Norm+ReLU
445
+ (Nbatch, 16,16,16,64)
446
+ (Nbatch, 16,16,16,64)
447
+ Resblock 1
448
+
449
+ (Nbatch, 16, 16, 16, 64)
450
+ (Nbatch, 16, 16, 16, 64)
451
+
452
+ (Nbatch, 16,16,16,64)
453
+ Batch Norm+ReLU
454
+ (Nbatch, 16,16,16,64)
455
+ (Nbatch, 16,16,16,64)
456
+ Resblock 2
457
+
458
+ (Nbatch, 16, 16, 16, 64)
459
+ (Nbatch, 8, 8, 8, 128)
460
+
461
+ (Nbatch, 8,8,8,128)
462
+ Batch Norm+ReLU
463
+ (Nbatch, 8,8,8,128 )
464
+ (Nbatch, 8,8,8,128)
465
+ Resblock 3
466
+
467
+ (Nbatch, 8, 8, 8, 128)
468
+ (Nbatch, 4, 4, 4, 256)
469
+
470
+ (Nbatch, 4,4,4,256)
471
+ Batch Norm+ReLU
472
+ (Nbatch, 4,4,4,256 )
473
+ (Nbatch, 4,4,4,256)
474
+ Resblock 4
475
+
476
+ (Nbatch, 4, 4, 4, 256)
477
+ (Nbatch, 2, 2, 2, 512)
478
+
479
+ (Nbatch, 2,2,2,512)
480
+ Batch Norm+ReLU
481
+ (Nbatch, 2,2,2,512 )
482
+ (Nbatch, 2,2,2,512)
483
+ Global Avg Pooling
484
+ (Nbatch, 2,2,2,512)
485
+ (Nbatch, 512)
486
+ Table 1. Configuration of the backbone BNNs used for all experiments presented in this paper.
487
+ part of all BNNs account for the output of the network, which is dependent on the aleatoric uncertainty
488
+ parameterization. We use a 3D multivariate Gaussian distribution with nine parameters to be learnt (three
489
+ means µ for the cosmological parameters, and six elements for the covariance matrix Σ).
490
+ The loss function to be optimized during training is given by the ELBO 2 where the second term is
491
+ associated to the negative log-likelihood (NLL)
492
+ − NLL ∼ 1
493
+ 2 log |s · Σ| + 1
494
+ 2(y − µ)⊤ (s · Σ)−1 (y − µ),
495
+ (12)
496
+ averaged over the mini-batch. The scalar variable s is equal to one during the training process, and it
497
+ becomes a trainable variable during post-training to recalibrate the probability density function (16, 31).
498
+ The algorithm used to minimize the objective function is the Adam optimizer with first and second moment
499
+ exponential decay rates of 0.9 and 0.999, respectively (40). The learning rate starts from 10−3 and it
500
+ will be reduced by a factor of 0.8 in case that any improvement has not been observed after 10 epochs.
501
+ Furthermore, we have applied warm-up period for which the model turns on progressively the KL term
502
+ in Eq. 2. This is achieved by introducing a β variable in the ELBO, i.e., β · KL
503
+
504
+ q(w|θ)
505
+ ��p(w)
506
+
507
+ , so, this
508
+ parameter starts being equal to 0 and grows linearly to 1 during 10 epochs (41). BNNs were trained with
509
+ 32 batches and early stopping callback for avoiding over-fitting. The infrastructure used was the Google
510
+ Cloud Platform (GCP) using a nvidia-tesla-t4 of 16 GB GDDR6 in a N1 machine series shared-core.
511
+ 7
512
+
513
+ Hort´ua et al.
514
+ Parameter estimation via BNNs
515
+ Figure 1a. Illustration of the first skip connection in
516
+ a residual block.
517
+ Figure 1b. Illustration of the second skip connection
518
+ in the residual block.
519
+ Figure 1. Each Resblock includes both skip connection configurations. (A) The Resblock starts with this
520
+ configuration applied to the input tensor. (B) The output of the previous configuration is fed into this
521
+ connection.
522
+ 4.1
523
+ Metrics
524
+ We compare all BNN results in terms of performance, i.e., the precision of their predictions for the
525
+ cosmological parameters quantified through Mean Square Error (MSE), ELBO, and plotting the true vs
526
+ predicted values with its coefficient of determination. Also, it is important to quantify the quality of the
527
+ uncertainty estimates. One of the ways to diagnostic the quality of the uncertainty estimates is through
528
+ reliability diagrams. Following (31, 11), we can define perfect calibration of regression uncertainty as
529
+ Eˆσ2
530
+ ����
531
+ E[(y − µ)2]
532
+ �� ˆσ2 = α2�
533
+ − α2���
534
+
535
+
536
+ α2 ∈ R
537
+ �� α2 ≥ 0
538
+
539
+ .
540
+ (13)
541
+ Hence, the predicted uncertainty ˆσ2 is partitioned into K bins with equal width, and the variance per bin is
542
+ defined as
543
+ var(Bk) :=
544
+ 1
545
+ ��Bk
546
+ ��
547
+
548
+ i∈Bm
549
+ 1
550
+ N
551
+ N
552
+
553
+ n=1
554
+ (µi,n − yi)2,
555
+ (14)
556
+ with N stochastic forward passes. On the other hand, the uncertainty per bin is defined as
557
+ uncert(Bk) :=
558
+ 1
559
+ |Bk|
560
+
561
+ i∈Bk
562
+ ˆσ2
563
+ i .
564
+ (15)
565
+ With these two quantities, we can generate reliability diagrams to assess the quality of the estimated
566
+ uncertainty via plotting var(Bk) vs. uncert(Bk). In addition, we can compute the expected uncertainty
567
+ 8
568
+
569
+ skip connection
570
+ (identity)
571
+ F() +
572
+ Conv3D
573
+ BN+ReLU
574
+ Conv3D
575
+ + (。)
576
+ add
577
+ output
578
+ F()skip connection
579
+ (identity)
580
+ F() +
581
+ BN+ReLU
582
+ Conv3D
583
+ BN+ReLU
584
+ Conv3D
585
+ + (,)
586
+ ppe
587
+ output
588
+ F()Hort´ua et al.
589
+ Parameter estimation via BNNs
590
+ Metrics
591
+ FlipoutBNNs
592
+ VBNNs
593
+ sBNNs
594
+ Ωm
595
+ σ8
596
+ h
597
+ Ωmh2
598
+ σ8Ω0.25
599
+ m
600
+ Ωm
601
+ σ8
602
+ h
603
+ Ωmh2
604
+ σ8Ω0.25
605
+ m
606
+ Ωm
607
+ σ8
608
+ h
609
+ Ωmh2
610
+ σ8Ω0.25
611
+ m
612
+ MSE
613
+ 0.063
614
+ 0.057
615
+ 0.190
616
+ ELBO
617
+ 20.85
618
+ 19.71
619
+ 31.57
620
+ r2
621
+ 0.82
622
+ 0.98
623
+ 0.2
624
+ 0.03
625
+ 0.93
626
+ 0.85
627
+ 0.99
628
+ 0.4
629
+ 0.56
630
+ 0.95
631
+ 0.75
632
+ 0.85
633
+ 0.01
634
+ 0.23
635
+ 0.80
636
+ UCE
637
+ 0.109
638
+ 8.10
639
+ 0.26
640
+ 0.0008
641
+ 0.0008
642
+ 0.010
643
+ >1.0
644
+ Table 2. Metrics test set results for all BNNs architectures. High UCE values indicate miscalibration. MSE
645
+ and ELBO are computed only over the cosmological parameters.
646
+ calibration error (UCE) in order to quantify the miscalibration
647
+ UCE :=
648
+ K
649
+
650
+ k=1
651
+ |Bk|
652
+ m
653
+ ��var(Bk) − uncert(Bk)
654
+ ��,
655
+ (16)
656
+ with number of inputs m and set of indices Bk of inputs, for which the uncertainty falls into the bin k. A
657
+ more general approach proposed in (16) consists in computing the expected coverage probabilities defined
658
+ as the x% of samples for which the true value of the parameters falls in the x%-confidence region defined
659
+ by the joint posterior. Clearly, this option is more precise since it captures higher-order statistics through
660
+ the full posterior distribution. However, for simplicity, we will follow the UCE approach.
661
+ 5
662
+ ANALYSIS AND RESULTS OF PARAMETER INFERENCE WITH BNNS
663
+ In this section we discuss the results obtained by comparing three different versions of BNNs, the one
664
+ with MNFs, the standard BNN, and the third one using Flipout as estimator. The results reported in this
665
+ section were computed on the Test dataset. Table 2. shows the metrics obtained for each BNN approach.
666
+ As mentioned, MSE, ELBO and r2 provide well estimates for determining the precision of the model,
667
+ while UCE measures the miscalibration. Here, we can observe that VBNNs outperform all experiments,
668
+ not only taking into account the average error, but also the precision for each cosmological parameter along
669
+ with a good calibration in its uncertainty predictions. Followed by VBBNs, we have the FlipoutBNNs,
670
+ however, although this approach yields good cosmological parameter estimation, it understimates their
671
+ uncertainties. Therefore, VBNNs avoids indeed the application of an extra post training step in the Machine
672
+ Learning pipeline related to calibration. Notice that in all experiments, h becomes hardly predicted for all
673
+ model. Figure 2 displays the predicted against true values for Ωm, ωm (instead of h), σ8 and the degeneracy
674
+ direction defined as σ8Ω0.25
675
+ m . Error bars report the epistemic plus aleatoric uncertainties predicted by BNNs,
676
+ which illustrates the advantages of these probabilistic models where the certainty prediction of the model is
677
+ captured instead of traditional DNNs where only point estimates are present. This uncertainty was taken
678
+ from the diagonal part of the covariance matrix.
679
+ 5.1
680
+ Calibration metrics
681
+ In figure 3, we analyze the quality of our uncertainty measurement using calibration diagrams. We show
682
+ the predicted uncertainty vs observed uncertainty from our model on the Test dataset. Better performing
683
+ uncertainty estimates should correlate more accurately with the dashed lines. We can see that estimating
684
+ uncertainty from VBNNs reflect better the real uncertainty. Furthermore, the scale for VBNNs is two
685
+ orders of magnitude lower than FlipoutBNN, which also implies how reliable is this models according
686
+ to their predictions. Notice that the even if we partitioned the variance into K = 10 bins with equal
687
+ width, FlipoutBNNs and sBNNs yield underestimate uncertainties (many examples concentrates in lower
688
+ bin values), for this reason we see that while VBNNs supply all ten samples in the calibration plots, for
689
+ 9
690
+
691
+ Hort´ua et al.
692
+ Parameter estimation via BNNs
693
+ Figure 2. Plots of True vs Predicted values provided by the best experiment VBNNs, for Ωm, σ8, and
694
+ some derivative parameters. Points are the mean of the predicted distributions, and error bars stand for the
695
+ heteroscedastic uncertainty associated to epistemic plus aleatoric uncertainty at 1σ.
696
+ the others we have just 3-4 of them. Next, we employed the σ-scaling methodology for calibrating the
697
+ FlipoutBNNs predictions (31). For doing so, we optimize uniquely the loss function described in Eq. 12
698
+ where all parameters related to the BNNs where frozen, i.e., the only trainable parameter was s. After
699
+ training, we got s ∼ 0.723, reducing UCE only up to 10%, and the number of samples in the calibration
700
+ diagrams enlarged to 4-5. This minor performance enhancement means that σ-scaling is not suitable to
701
+ calibrate all BNNs, and alternative re-calibration techniques must be taken into account in order to build
702
+ reliable intervals. At this point, we have noticed the advantages of working with methods that leading with
703
+ networks already well-calibrated after the training step (17).
704
+ 5.2
705
+ Joint analysis for Cosmological parameters
706
+ In order to show the parameter intervals and contours from the N-body simulations, we choose randomly
707
+ an example from the test set with true values shown in table 3. The two-dimensional posterior distribution
708
+ of the cosmological parameters are shown in figure 4 and the parameter 95% intervals are reported in
709
+ table 3. We can observe that VBNNs provides considerably tighter and well constraints on all parameters
710
+ 10
711
+
712
+ 0.6
713
+ perfectmatch
714
+ 1.1
715
+ perfectmatch
716
+ 0.5
717
+ 1.0
718
+ 8
719
+ 0.9
720
+ Predicted
721
+ Predicted
722
+ 0.3
723
+ 0.8
724
+ 0.2
725
+ 0.7
726
+ 0.1
727
+ 0.6
728
+ 0.2
729
+ 0.3
730
+ 0.4
731
+ 0.7
732
+ 0.8
733
+ 0.9
734
+ True08
735
+ perfect match
736
+ perfect match
737
+ 0.40
738
+ 0.8
739
+ 0.35
740
+ 0.7
741
+ ywu
742
+ 0.30
743
+ 0.25
744
+ Predicted
745
+ 0.6
746
+ 0.20
747
+ 0.5
748
+ 0.15
749
+ 0.10
750
+ 0.4
751
+ 0.05
752
+ 0.4
753
+ 0.5
754
+ 0.6
755
+ 0.7
756
+ 0.8
757
+ 0.1
758
+ 0.2
759
+ 0.3
760
+ True Ωmh?Hort´ua et al.
761
+ Parameter estimation via BNNs
762
+ Figure 3. Calibration diagrams for the best experiments, VBNNs and FlipoutBNNs. The lower is the
763
+ UCE value, the higher is the calibration of the model. Dashes lines stand for the perfect calibration, so, the
764
+ discrepancy to this identity curve reveals miscalibration.
765
+ with respect to the sBNNs (18). Most important, this technique offers also the correlation among parameters
766
+ and the measurement about how reliable the model in their predictions.
767
+ 6
768
+ CONCLUSIONS
769
+ N-body simulations offer one of the most powerful ways to understand the initial conditions of the
770
+ Universe and improve our knowledge on fundamental physics. In this paper we used QUIJOTE dataset, in
771
+ order to show how convolutional DNNs capture non-Gaussian patters without requiring a specifying the
772
+ summary statistic (such as PS). Additionally, we have show how we can build probabilistic DNNs to obtain
773
+ uncertainties which account for the reliability in their predictions. One of the main goals of this paper was
774
+ 11
775
+
776
+ CalibrationforQm withVBNN
777
+ Calibrationforo:withVBNN
778
+ 1e-3
779
+ 1e-3
780
+ UCE=0.0008
781
+ UCE=0.0008
782
+ 4
783
+ 2
784
+ m
785
+ 2
786
+ 1
787
+ 0
788
+ 0.5
789
+ 1.0
790
+ 1.5
791
+ 2.0
792
+ 2.5
793
+ 3.0
794
+ 3.5
795
+ 0.2
796
+ 0.4
797
+ 0.6
798
+ 0.8
799
+ 1.0
800
+ 1.2
801
+ Expecteduncertainty
802
+ 1e-3
803
+ Expecteduncertainty
804
+ 1e-3
805
+ le-2 Calibration for h with VBNN
806
+ Calibration for Qm with FlipoutBNNs
807
+ 0.3
808
+ rtainty
809
+ 2.5
810
+ UCE=0.0105
811
+ Observed uncertainty
812
+ UCE=0.1095
813
+ 2.0
814
+ uncer
815
+ 0.2
816
+ 1.5
817
+ Observed
818
+ 0.1
819
+ 1.0
820
+ 0.5
821
+ 0.0
822
+ 0.6
823
+ 0.7
824
+ 0.8
825
+ 0.9
826
+ 1.0
827
+ 1.1
828
+ 0.00
829
+ 0.05
830
+ 0.10
831
+ 0.15
832
+ 0.20
833
+ 0.25
834
+ 0.30
835
+ Expected uncertainty
836
+ 1e-2
837
+ Expecteduncertainty
838
+ CalibrationforO: withFlipoutBNNs
839
+ CalibrationforhwithFlipoutBNNs
840
+ Observed uncertainty
841
+ 20
842
+ UCE=8.099
843
+ UCE=0.2595
844
+ 15
845
+ 4
846
+ 3
847
+ 10
848
+ 2
849
+ 5
850
+ 1
851
+ 0
852
+ 5
853
+ 10
854
+ 15
855
+ 20
856
+ 0
857
+ 1
858
+ 2
859
+ 3
860
+ 4
861
+ 5
862
+ Expected uncertainty
863
+ ExpecteduncertaintyHort´ua et al.
864
+ Parameter estimation via BNNs
865
+ Figure 4. 68% and 95% parameter constraint contours from one example of Quijote test dataset using
866
+ VBNNs and FlipoutBNNs. The diagonal plots are the marginalized parameter constraints, the dashed lines
867
+ stand for the the true values. This plot was made using Getdist (42).
868
+ also reporting how improves these BNNs when we integrate them with techniques such as a Multiplicative
869
+ normalizing flows to enhance the variational posterior complexity. We found that VBNNs not only provides
870
+ considerably tighter and well constraints on all cosmological parameters as we observed in figure 4, but
871
+ also yields with well-calibrated estimate uncertainties as it was shown in figure 3. Nevertheless, some
872
+ limitations in this research includes simple prior assumptions (mean-field approximations), lower resolution
873
+ in the simulations, and absence of additional calibration techniques. These restrictions will be analysed in
874
+ detail in a future paper.
875
+ 12
876
+
877
+ 0.6
878
+ D
879
+ 0.5
880
+ m
881
+ 0.4
882
+ 0.3
883
+ 0.8
884
+ 0o 0.7
885
+ b
886
+ 0.6
887
+ 1.0
888
+ 60.8
889
+ 0.6
890
+ 0.65
891
+ .25
892
+ 0.60
893
+ 0.55
894
+ 0.50
895
+ 0.6
896
+ 2
897
+ 0.4
898
+ 0.2
899
+ 0.3
900
+ 0.4
901
+ 0.5
902
+ 0.6
903
+ 0.6
904
+ 0.7
905
+ 0.8
906
+ 0.6
907
+ 0.8
908
+ 1.0
909
+ 0.50 0.55 0.60 0.65
910
+ 0.2
911
+ 0.4
912
+ 0.6
913
+ Qm
914
+ h
915
+ 0:Q0.25
916
+ Qmh2
917
+ m
918
+ VBNNs
919
+ FlipoutBNNsHort´ua et al.
920
+ Parameter estimation via BNNs
921
+ Parameter
922
+ 95% limits VBNNs
923
+ 95% limits FlipoutBNNs
924
+ True Value
925
+ Ωm
926
+ 0.47+0.10
927
+ −0.10
928
+ 0.45+0.11
929
+ −0.11
930
+ 0.495
931
+ σ8
932
+ 0.697+0.038
933
+ −0.038
934
+ 0.699+0.059
935
+ −0.060
936
+ 0.699
937
+ h
938
+ 0.81+0.17
939
+ −0.17
940
+ 0.78+0.20
941
+ −0.19
942
+ 0.800
943
+ σ8Ω0.25
944
+ m
945
+ 0.577+0.051
946
+ −0.052
947
+ 0.573+0.063
948
+ −0.064
949
+ 0.587
950
+ Ωmh2
951
+ 0.31+0.19
952
+ −0.18
953
+ 0.573+0.063
954
+ −0.064
955
+ 0.317
956
+ Table 3. Parameter 95% intervals taken from the parameter constraint contours (figure 4) from one example
957
+ of Quijote test dataset using VBNN and FlipoutBNN.
958
+ ACKNOWLEDGMENTS
959
+ This paper is based upon work supported by the Google Cloud Research Credits program with the award
960
+ GCP19980904.
961
+ Leonardo Casta˜neda was supported by patrimonio aut´onomo fondo Nacional de financiamiento para
962
+ la ciencia y la tecnolog´ıa y la innovacion Francisco Jos´e de Caldas (Minciencias Colombia) grant No
963
+ 110685269447 RC-80740-465-2020 projects 69723. H. J. Hort´ua acknowledges the support from cr´editos
964
+ educaci´on de doctorados nacionales y en el exterior- colciencias, and the grant provided by the Google
965
+ Cloud Research Credits program.
966
+ REFERENCES
967
+ 1 .Stefano B, Kravtsov A. Cosmological simulations of galaxy clusters. Advanced Science Letters 4
968
+ (2011) 204–227. doi:10.1166/asl.2011.1209.
969
+ 2 .Dodelson S. Modern Cosmology (Academic Press, Elsevier Science) (2003).
970
+ 3 .Planck C, Aghanim N, Akrami Y, Ashdown M, Aumont J, Baccigalupi C, et al. Planck 2018 results.
971
+ Astronomy and Astrophysics 641 (2020) A6. doi:10.1051/0004-6361/201833910.
972
+ 4 .Tinker JL, Sheldon ES, Wechsler RH, Becker MR, Rozo E, Zu Y, et al.
973
+ COSMOLOGICAL
974
+ CONSTRAINTS FROM GALAXY CLUSTERING AND THE MASS-TO-NUMBER RATIO OF
975
+ GALAXY CLUSTERS. The Astrophysical Journal 745 (2011) 16. doi:10.1088/0004-637x/745/1/16.
976
+ 5 .Yusofi E, Ramzanpour MA.
977
+ Cosmological constant problem and h0 tension in void-dominated
978
+ cosmology (2022). doi:10.48550/ARXIV.2204.12180.
979
+ 6 .Mesinger A, Furlanetto S, Cen R. 21cmfast: a fast, seminumerical simulation of the high-redshift
980
+ 21-cm signal. Monthly Notices of the Royal Astronomical Society 411 (2011) 955–972. doi:10.1111/j.
981
+ 1365-2966.2010.17731.x.
982
+ 7 .Hamann J, Hannestad S, Lesgourgues J, Rampf C, Wong YY. Cosmological parameters from large
983
+ scale structure - geometric versus shape information. Journal of Cosmology and Astroparticle Physics
984
+ 2010 (2010) 022–022. doi:10.1088/1475-7516/2010/07/022.
985
+ 8 .Abdalla E, Abell’an GF, Aboubrahim A, Agnello A, Akarsu O, Akrami Y, et al. Cosmology intertwined:
986
+ A review of the particle physics, astrophysics, and cosmology associated with the cosmological tensions
987
+ and anomalies. Journal of High Energy Astrophysics (2022).
988
+ 9 .Gillet N, Mesinger A, Greig B, Liu A, Ucci G. Deep learning from 21-cm tomography of the cosmic
989
+ dawn and reionization. Monthly Notices of the Royal Astronomical Society 484 (2019) 282–293.
990
+ doi:10.1093/mnras/stz010.
991
+ 10 .Dvorkin C, Mishra-Sharma S, Nord B, Villar VA, Avestruz C, Bechtol K, et al. Machine learning and
992
+ cosmology (2022). doi:10.48550/ARXIV.2203.08056.
993
+ 13
994
+
995
+ Hort´ua et al.
996
+ Parameter estimation via BNNs
997
+ 11 .Guo C, Pleiss G, Sun Y, Weinberger KQ. On calibration of modern neural networks. Proceedings of
998
+ the 34th International Conference on Machine Learning - Volume 70 (JMLR.org) (2017), ICML 17,
999
+ 1321–1330.
1000
+ 12 .Chang DT. Bayesian neural networks: Essentials (2021). doi:10.48550/ARXIV.2106.13594.
1001
+ 13 .Ravanbakhsh S, Oliva J, Fromenteau S, Price LC, Ho S, Schneider J, et al. Estimating cosmological
1002
+ parameters from the dark matter distribution (2017). doi:10.48550/ARXIV.1711.02033.
1003
+ 14 .Lazanu A. Extracting cosmological parameters from n-body simulations using machine learning
1004
+ techniques. Journal of Cosmology and Astroparticle Physics 2021 (2021) 039. doi:10.1088/1475-7516/
1005
+ 2021/09/039.
1006
+ 15 .Wang BY, Pisani A, Villaescusa-Navarro F, Wandelt BD. Machine learning cosmology from void
1007
+ properties (2022). doi:10.48550/ARXIV.2212.06860.
1008
+ 16 .Hort´ua HJ, Volpi R, Marinelli D, Malag`o L. Parameter estimation for the cosmic microwave background
1009
+ with bayesian neural networks. Physical Review D 102 (2020). doi:10.1103/physrevd.102.103509.
1010
+ 17 .Hort´ua HJ, Malag`o L, Volpi R. Constraining the reionization history using bayesian normalizing flows.
1011
+ Machine Learning: Science and Technology 1 (2020) 035014. doi:10.1088/2632-2153/aba6f1.
1012
+ 18 .Hortua HJ. Constraining cosmological parameters from n-body simulations with bayesian neural
1013
+ networks (2021). doi:10.48550/ARXIV.2112.11865.
1014
+ 19 .Mancarella M, Kennedy J, Bose B, Lombriser L. Seeking new physics in cosmology with bayesian
1015
+ neural networks: Dark energy and modified gravity. Phys. Rev. D 105 (2022) 023531. doi:10.1103/
1016
+ PhysRevD.105.023531.
1017
+ 20 .List F, Rodd NL, Lewis GF, Bhat I. Galactic center excess in a new light: Disentangling the gamma-
1018
+ ray sky with bayesian graph convolutional neural networks. Physical Review Letters 125 (2020).
1019
+ doi:10.1103/physrevlett.125.241102.
1020
+ 21 .Wagner-Carena S, Park JW, Birrer S, Marshall PJ, Roodman A, Wechsler RH. Hierarchical inference
1021
+ with bayesian neural networks: An application to strong gravitational lensing. The Astrophysical
1022
+ Journal 909 (2021) 187. doi:10.3847/1538-4357/abdf59.
1023
+ 22 .Graves A, editor. Practical Variational Inference for Neural Networks, vol. 24 (Curran Associates,
1024
+ Inc.) (2011).
1025
+ 23 .Charnock T, Perreault-Levasseur L, Lanusse F. Bayesian neural networks (2020). doi:10.48550/ARXIV.
1026
+ 2006.01490.
1027
+ 24 .Villaescusa-Navarro F, Hahn C, Massara E, Banerjee A, Delgado AM, Ramanah DK, et al. The quijote
1028
+ simulations. The Astrophysical Journal Supplement Series 250 (2020) 2. doi:10.3847/1538-4365/
1029
+ ab9d82.
1030
+ 25 .Abdar M, Pourpanah F, Hussain S, Rezazadegan D, Liu L, Ghavamzadeh M, et al. A review of
1031
+ uncertainty quantification in deep learning: Techniques, applications and challenges. Information
1032
+ Fusion 76 (2021) 243–297. doi:10.1016/j.inffus.2021.05.008.
1033
+ 26 .Gal Y. Uncertainty in Deep Learning. Ph.D. thesis, University of Cambridge (2016).
1034
+ 27 .Wen Y, Vicol P, Ba J, Tran D, Grosse R. Flipout: Efficient pseudo-independent weight perturbations on
1035
+ mini-batches (2018). doi:10.48550/ARXIV.1803.04386.
1036
+ 28 .Kiureghian AD, Ditlevsen O. Aleatory or epistemic? does it matter? Structural Safety 31 (2009) 105 –
1037
+ 112. doi:https://doi.org/10.1016/j.strusafe.2008.06.020. Risk Acceptance and Risk Communication.
1038
+ 29 .Kendall A, Gal Y. What uncertainties do we need in bayesian deep learning for computer vision?
1039
+ (2017).
1040
+ 30 .Kwon Y, Won JH, Joon Kim B, Paik M. Ininternational conference on medical imaging with deep
1041
+ learning (2018) 13.
1042
+ 14
1043
+
1044
+ Hort´ua et al.
1045
+ Parameter estimation via BNNs
1046
+ 31 .Laves MH, Ihler S, Fast JF, Kahrs LA, Ortmaier T. Well-calibrated regression uncertainty in medical
1047
+ imaging with deep learning. Medical Imaging with Deep Learning (2020).
1048
+ 32 .Louizos C, Welling M. Multiplicative normalizing flows for variational bayesian neural networks.
1049
+ Proceedings of the 34th International Conference on Machine Learning - Volume 70 (JMLR.org)
1050
+ (2017), ICML’17, 2218–2227.
1051
+ 33 .Ranganath R, Tran D, Blei DM. Hierarchical variational models. Proceedings of the 33rd International
1052
+ Conference on International Conference on Machine Learning - Volume 48 (JMLR.org) (2016),
1053
+ ICML’16, 2568–2577.
1054
+ 34 .Touati A, Satija H, Romoff J, Pineau J, Vincent P. Randomized value functions via multiplicative
1055
+ normalizing flows (2018). doi:10.48550/ARXIV.1806.02315.
1056
+ 35 .Dinh L, Sohl-Dickstein J, Bengio S. Density estimation using real NVP. International Conference on
1057
+ Learning Representations (2017).
1058
+ 36 .Springel V. The cosmological simulation code gadget-2. Monthly Notices of the Royal Astronomical
1059
+ Society 364 (2005) 1105–1134. doi:10.1111/j.1365-2966.2005.09655.x.
1060
+ 37 .Scoccimarro R. Transients from initial conditions: a perturbative analysis. Monthly Notices of the
1061
+ Royal Astronomical Society 299 (1998) 1097–1118. doi:10.1046/j.1365-8711.1998.01845.x.
1062
+ 38 .Abadi M, Agarwal A, Barham P, Brevdo E, Chen Z, Citro C, et al. TensorFlow: Large-scale machine
1063
+ learning on heterogeneous systems (2015). Software available from tensorflow.org.
1064
+ 39 .Solovyev R, Kalinin AA, Gabruseva T. 3d convolutional neural networks for stalled brain capillary
1065
+ detection. Computers in Biology and Medicine 141 (2022) 105089. doi:10.1016/j.compbiomed.2021.
1066
+ 105089.
1067
+ 40 .Kingma DP, Ba J. Adam: A method for stochastic optimization (2014). doi:10.48550/ARXIV.1412.
1068
+ 6980.
1069
+ 41 .Sønderby CK, Raiko T, Maaløe L, Sønderby SK, Winther O.
1070
+ Ladder variational autoencoders.
1071
+ Proceedings of the 30th International Conference on Neural Information Processing Systems (Red
1072
+ Hook, NY, USA: Curran Associates Inc.) (2016), NIPS’16, 3745–3753.
1073
+ 42 .Lewis A. GetDist: a Python package for analysing Monte Carlo samples (2019).
1074
+ 15
1075
+
9NE2T4oBgHgl3EQflwcz/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
AdE1T4oBgHgl3EQf9Aai/content/tmp_files/2301.03552v1.pdf.txt ADDED
@@ -0,0 +1,1395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Lieb lattices and pseudospin-1 dynamics under barrier- and
2
+ well-like electrostatic interactions
3
+ V. Jakubsk´y1 and K. Zelaya1
4
+ 1Nuclear Physics Institute, Czech Academy of Science, 250 68 ˇReˇz, Czech Republic
5
+ Abstract
6
+ This work considers the confining and scattering phenomena of electrons in a Lieb lattice
7
+ subjected to the influence of a rectangular electrostatic barrier.
8
+ In this setup, hopping
9
+ amplitudes between nearest neighbors in orthogonal directions are considered different, and
10
+ the next-nearest neighbor interaction describes spin-orbit coupling. This makes it possible to
11
+ confine electrons and generate bound states, the exact number of which is exactly determined
12
+ for null parallel momentum to the barrier. In such a case, it is proved that one even and one
13
+ odd bound state is always generated, and the number of bound states increases for non-null
14
+ and increasing values of the parallel momentum. That is, bound states carry current. In the
15
+ scattering regime, the exact values of energy are determined where the resonant tunneling
16
+ occurs. The existence of perfect tunneling energy in the form of super-Klein tunneling is
17
+ proved to exist regardless of the bang gap opening. Finally, it is shown that perfect reflection
18
+ appears when solutions are coupled to the intermediate flat-band solution.
19
+ 1
20
+ Introduction
21
+ The theoretical and experimental progress in the physics of graphene and other Dirac materials
22
+ has become a trending topic in material science and theoretical physics [1,2]. Many remarkable
23
+ properties of these materials follow from the fact that dynamics of low-energy quasi-particles is
24
+ described by equations known in relativistic quantum mechanics. It makes it possible to test
25
+ relativistic properties such as Klein tunneling [3,4], relativistic Landau levels, and the existence
26
+ of pseudoparticles violating the Lorentz invariance [5, 6] (type-II Dirac fermions). Graphene
27
+ mono- and multi-layer systems exhibit transport properties such as quantum Hall effect [7]
28
+ and anomalous quantum Hall effect in graphene [8], and Josephson effect in twisted cuprate
29
+ bilayers [9].
30
+ Graphene has shown to be a helpful benchmark system to test the properties of relativistic
31
+ pseudospin-1/2 particles in low-energy systems.
32
+ Nevertheless, the family of Dirac materials
33
+ contains also other, equally interesting, members.
34
+ Their geometries can extend beyond the
35
+ honeycomb lattice.
36
+ For instance, there are Kagome [10], Dice or α − T3 [11, 12], and Lieb
37
+ lattices [13, 14], which lead to effective pseudospin-1 Dirac equations. It was recently showed
38
+ that the Kagome lattice can be obtained from a geometrical deformation of the Lieb lattice [15].
39
+ For a recent survey of two-dimensional lattices and their physical properties and realization,
40
+ see [16].
41
+ 1
42
+ arXiv:2301.03552v1 [cond-mat.mes-hall] 9 Jan 2023
43
+
44
+ Particularly, the Lieb lattice is a two-dimensional array with a periodicity of a square lattice.
45
+ The sites are located in the corners of each square and at the midpoints on its sides. To our
46
+ best knowledge, the Lieb lattice has not been found in nature. However, it has been prepared
47
+ artificially in diverse ways [17, 18]. It was realized in experiments with optical fibers [19–23].
48
+ Furthermore, it was formed by ultracold atoms trapped in optical lattices [24] or by electrons
49
+ of Cu(111) atoms confined by an array of CO molecules [25]. It was also prepared in covalent-
50
+ organic frameworks [26].
51
+ The tight-binding model can well describe the band structure of the Lieb lattice. It reveals
52
+ the existence of two bands with positive and negative energies and an additional so-called flat
53
+ band. The latter is associated with the states that have fixed (zero) energy independent of the
54
+ value of momentum. It is worth mentioning that the flat band solutions were prepared in the
55
+ optical experiments, see [22, 23]. Similarly to graphene, the dynamics of the low-energy quasi-
56
+ particles in the Lieb lattice is dictated by a relativistic Dirac-type equation. Nevertheless, these
57
+ quasi-particles have pseudospin-1 due to three atoms per unit cell.
58
+ In the current article, we investigate the scattering and confinement of the relativistic quasi-
59
+ particles by a rectangular electric potential in the Lieb lattice with a gapped band structure.
60
+ Gap-opening can be induced by on-site energy that differs on three sublattices or by the phase
61
+ acquired by the electron when jumping between the neighboring sites [24], see also [27]. In
62
+ the article, we adopt the second approach where a purely imaginary next nearest-neighbor
63
+ interaction, attributed to spin-orbital coupling [13], is taken into account.
64
+ Effects such as electron confinement and transmission are obtained with the aid of the proper
65
+ boundary conditions, which enforce the continuity on two out of the three pseudospin-1 compo-
66
+ nents. The third component can be discontinuous, which leads to a spatial discontinuity in the
67
+ probability density. Nevertheless, it does not compromise the associated continuity equation.
68
+ Electron dynamics for electrostatic interactions in graphene have been discussed in the litera-
69
+ ture, such as the transmission properties in square barriers [28,29] and electron confinement with
70
+ cylindrical quantum dots [30]. We thus focus on the related properties of the quasi-particle dy-
71
+ namics in the Lieb lattice. We further analyze the influence of the flat-band solution in electron
72
+ dynamics. As shown in the manuscript, solutions in this regime are described by degenerate
73
+ Bloch-wave solutions whose linear combinations can compose wavepackets of arbitrary form.
74
+ These are shown to be current-free solutions regardless of the nature of the wavepacket. As a
75
+ result, one obtains perfectly reflected waves when they couple to flat-band solutions.
76
+ The manuscript is structured as follows. In Sec. 2 we briefly introduce and discuss the main
77
+ properties of the Lieb lattice with nearest next-nearest neighbor interactions, from which the
78
+ effective low-energy Dirac equation is obtained.
79
+ In Sec. 3, we present the general solutions
80
+ and the transfer matrix associated with the rectangular electrostatic interaction. The latter
81
+ is then exploited in Sec. 4 and Sec. 5 to discuss in full detail the localization of electrons and
82
+ scattering dynamics, respectively. Finally, discussions and perspectives are provided in Sec. 7,
83
+ and complementary details about the proof of the number of bound states are given in App. A.
84
+ 2
85
+
86
+ (a)
87
+ (b)
88
+ Figure 1: (a) Lieb lattice, composed by the atoms A (blue-filled circle), B (green square), and C
89
+ (red-filled square). The dashed arrows denote the direction of positive phase hopping parameter
90
+ between next-nearest neighbors B − C. (b) Composition of a unit cell of the Lieb lattice. The
91
+ unit displacement vectors ⃗δ1 = aˆx and ˆδ2 = aˆy connect the atom A with B and A with C,
92
+ respectively. The corresponding nearest hopping parameters are t1, t2, whereas the next-nearest
93
+ neighbor hopping parameter is +it3 and −it3 depending if it occurs in the direction denotes by
94
+ the arrows.
95
+ 2
96
+ Lieb lattice and pseudospin-1 Dirac equation
97
+ Let us consider an electronic Lieb lattice 1 so that the separation between two nearest atoms is
98
+ a, the length of each side of the square is ℓ = 2a. There are three sites in the elementary cell, see
99
+ Fig. 1a. The primitive translation vectors are ⃗r1 = 2aˆx and ⃗r2 = 2aˆy. It is customary to denote
100
+ the atoms at the corners of the square as A, whereas the atoms at the sides of the square are B
101
+ (horizontal) and C (vertical). The lattice vectors ⃗δ1 = aˆx = ⃗r1/2 and ⃗δ2 = aˆy = ⃗r2/2 connect an
102
+ atom on the site A to those on the sites B and C, respectively (see Fig. 1b). The atoms A, B and
103
+ C form the three sublattices RA = n1⃗r1 +n2⃗r2, RB = ˜RA +⃗δ1, and RC = ⃗RA +⃗δ2, respectively,
104
+ with n1, n2 ∈ Z. The reciprocal space is spanned by the translation vectors of the reciprocal
105
+ space ˆrk1 and ˆrk2, ˆrp · ˆrkq = 2πδp,q, p, q = 1, 2. This leads to ˆrk1 = π
106
+ a ˆx and ˆrk2 = π
107
+ a ˆy. The
108
+ first Brillouin zone, constructed from the Wigner-Seitz rule, restricts to the region composed by
109
+ kx ∈ [− π
110
+ 2a, π
111
+ 2a] and ky ∈ [− π
112
+ 2a, π
113
+ 2a].
114
+ The band structure of the electrons on the Lieb lattice can be analyzed with the use of the
115
+ tight-binding model. There are considered the nearest neighbor (NN) interactions between the
116
+ sites A − B and A − C, represented by the hopping parameters t1 and t2, respectively. We take
117
+ into account also the next-nearest neighbor (NNN) transition B − C, which can be complex
118
+ valued, with the sign of phase dependent on the orientation of the hopping. This emerges due
119
+ to external time-dependent driven fields in photonic Lieb lattices [31], and magnon Lieb and
120
+ Kagome lattices [32].
121
+ In particular, we consider a purely imaginary NNN hopping parameter e±iπ/2t3, where the
122
+ 1The results here obtained apply to optical Lieb lattices as well.
123
+ 3
124
+
125
+ C
126
+ A
127
+ BA
128
+ B(a) t3 = 0
129
+ (b) t3 ̸= 0
130
+ Figure 2: Dispersion bands w+(⃗k) (yellow-upper), w−(⃗k) (green-lower), and w0(⃗k) (blue-middle)
131
+ for the gapless (a) and gapped (b) configurations.
132
+ hopping phase is positive (+) is the hopping occurs counter-clock-wise, and negative (−) oth-
133
+ erwise. Such a hopping dynamics is depicted in Fig. 1a. This type of hopping was introduced
134
+ by Haldane in [8] as a model for quantum anomalous Hall effect in graphene without strong
135
+ external magnetic fields, which was latter found experimentally in [33]. See also [34] for a recent
136
+ review.
137
+ The spectral analysis of the tight-binding Hamiltonian reveals that there are three bands in
138
+ its spectrum [13],
139
+ w0(⃗k) = 0,
140
+ w±(⃗k) = ±2
141
+
142
+ t2
143
+ 1 cos2(akx) + t2
144
+ 2 cos2(aky) + 4t2
145
+ 3 sin2(akx) sin2(aky).
146
+ (1)
147
+ The bands have linear dependence on the momentum in the four Dirac points that are
148
+ situated in the first Brillouin zone. Their explicit position depends on the relative strength of
149
+ t3. In this work, we focus on the most relevant situation where t3 < t1
150
+ 2 , t3 < t2
151
+ 2 . In that case,
152
+ the Dirac point is ⃗K = ( π
153
+ 2a, π
154
+ 2a), see Fig.2b for illustration. A similar analysis holds for higher
155
+ values of t3, where the Dirac points are displaced with respect to ⃗K. For a detailed discussion,
156
+ see [13].
157
+ Let us calculate the approximate form of the tight-binding Hamiltonian in the vicinity of the
158
+ Dirac point ⃗K. We denote the effective operator as H(⃗k) ≡ H( ⃗K + ⃗k), where |⃗k| is considered
159
+ small enough so that we can keep terms up to first-order in ⃗k. The proper expansion of H(⃗k)
160
+ at the Dirac point ⃗K can be conveniently written as
161
+ H(⃗k) = 2at1kxS1 + 2at2kyS2 + 4t3S3.
162
+ (2)
163
+ The matrices
164
+ S1 =
165
+
166
+
167
+ 0
168
+ 1
169
+ 0
170
+ 1
171
+ 0
172
+ 0
173
+ 0
174
+ 0
175
+ 0
176
+
177
+ � ,
178
+ S2 =
179
+
180
+
181
+ 0
182
+ 0
183
+ 1
184
+ 0
185
+ 0
186
+ 0
187
+ 1
188
+ 0
189
+ 0
190
+
191
+ � ,
192
+ S3 =
193
+
194
+
195
+ 0
196
+ 0
197
+ 0
198
+ 0
199
+ 0
200
+ −i
201
+ 0
202
+ i
203
+ 0
204
+
205
+ � ,
206
+ (3)
207
+ form the three-dimensional representation of su(2) algebra, [Sp, Sq] = iεpqrSr, with εpqr the
208
+ three-dimensional anti-symmetric tensor. Therefore, the quasi-particles described by the effec-
209
+ tive Hamiltonian (2) have pseudospin 1.
210
+ 4
211
+
212
+
213
+ 0
214
+ 2a
215
+ a
216
+ 2
217
+ w(k)
218
+ 0
219
+ -2
220
+ 0
221
+
222
+ 2a
223
+ ky
224
+ a元
225
+ 0
226
+ 2a
227
+ a
228
+ 2
229
+ w(k)
230
+ -2
231
+ 0
232
+
233
+ 2a
234
+ ky
235
+ aIt is worth noting that, for t3 = 0, the resulting Dirac Hamiltonians in (2) becomes linear
236
+ combinations of the spin-1 matrices S1 and S2. In such a case, the matrix �S,
237
+ �S =
238
+
239
+
240
+ −1
241
+ 0
242
+ 0
243
+ 0
244
+ 1
245
+ 0
246
+ 0
247
+ 0
248
+ 1
249
+
250
+ � ,
251
+ (4)
252
+ satisfies {�S, Sj} = 0, with j = 1, 2, and represents the chiral symmetry of H as there holds
253
+ {�S, H|t3=0} = 0. The later relation implies that the eigenvalues E of H|t3=0 are symmetric
254
+ with respect to E = 0. When an eigenstate ΨE of H has energy E, then there is an eigenstate
255
+ Ψ−E = �SΨE with the energy of the opposite sign.
256
+ 2.1
257
+ External electrostatic interaction
258
+ Throughout this manuscript, we consider a piece-wise continuous external electric field dis-
259
+ tributed in the ˆx direction, while we discard any magnetic interaction. The corresponding effec-
260
+ tive Hamiltonian is obtained from (2) through the Peierls transformation [35,36], ⃗k → −iℏ⃗∇ and
261
+ iℏ∂t → iℏ∂t − U(⃗x)I, with I the 3 × 3 identity matrix. Since the Hamiltonian becomes invariant
262
+ on the ˆy direction, the eigenstates can be cast in the form Ψ(x, y) → e±ik2yΨ(x), where Ψ(x)
263
+ solve the following stationary equation:
264
+ H(x)Ψ(x) = (−iℏv1S1∂x + ℏv2kyS2 + mS3 + Ua I)Ψ(x) = EΨ(x),
265
+ (5)
266
+ with Ψ(x) = (ψA(x), ψB(x), ψC(x))T .
267
+ In (5), we have used v1 = 2at1, v2 = 2at2 and m = 4t3 to simplify the notation. This allows
268
+ us relating v1 and v2 to the Fermi velocities along the ˆx and ˆy directions, respectively, whereas
269
+ m plays the role of the mass term in the Dirac equation. Furthermore, we have considered
270
+ a constant electrostatic potential, which is valid for our purposes since we are dealing with
271
+ piece-wise continuous interactions.
272
+ From the previous considerations, we may decouple the eigensolution components ψA,B,C as
273
+ follows:
274
+ − ℏ2v2
275
+ 1ψ′′
276
+ A + ℏ2v2
277
+ 2k2
278
+ yψA = ((E − Ua)2 − m2)ψA,
279
+ (6)
280
+ ψB = −iℏv1(E − Ua)ψ′
281
+ A + ℏmv2kyψA
282
+ (E − Ua)2 − m2
283
+ ,
284
+ ψC = ℏmv1ψ′
285
+ A + ℏv2ky(E − Ua)ψA
286
+ (E − Ua)2 − m2
287
+ ,
288
+ (7)
289
+ where the hopping parameters tj, for j = 1, 2, 3.
290
+ The probability current associated with Ψ can be calculated in standard manner from the
291
+ continuity equation ∂tρ + ⃗∇ · j = 0. Here, ρ = Ψ†Ψ stands for the probability density, and the
292
+ probability current takes the form
293
+ j = (2v1 Re ψ∗
294
+ AψB, 2v2 Re ψ∗
295
+ AψC) .
296
+ (8)
297
+ Let us consider briefly the situation when the potential has a finite discontinuity at x = x0.
298
+ It is necessary to specify the behavior of the wave functions at this point. It can be done by
299
+ integrating (5) in the vicinity of x0. Alternatively, one can require the component of the density
300
+ 5
301
+
302
+ current perpendicular to the barrier to be continuous. The second approach is more general and
303
+ covers the boundary conditions provided by the integration as the special case that read as
304
+ ψA(x−
305
+ 0 ) = ψA(x+
306
+ 0 ),
307
+ ψB(x−
308
+ 0 ) = ψB(x+
309
+ 0 ).
310
+ (9)
311
+ It is worth noting that only two of the three eigensolution components are required to be
312
+ continuous in x0, and the third component ψC can have a discontinuity at this point.
313
+ The
314
+ corresponding probability density is not necessarily continuous. This observation was made in
315
+ pseudospin-1 photonic lattices [37]. The boundary conditions obtained in (9) keep the current
316
+ of probability density in the ˆx direction continuous, which is the component perpendicular to
317
+ the discontinuity. As the component ΨC(x) can be discontinuous at x0, the tangent current and
318
+ the probability densities are not necessarily continuous.
319
+ 3
320
+ Rectangular electrostatic barrier
321
+ Let us consider an external electrostatic electric potential homogeneous along the ˆy direction
322
+ and piece-wise continuous across the ˆx direction, with
323
+ U(x) =
324
+
325
+ 0
326
+ |x| > L
327
+ 2
328
+ U0
329
+ |x| ≤ L
330
+ 2
331
+ .
332
+ (10)
333
+ We consider, without loss of generality, U0 > 2m. Solutions of the stationary equation are split
334
+ into three regions, namely, the region I (x < L/2), region II (−L/2 ≤ x ≤ L/2), and region III
335
+ (x > L/2). The latter are written as
336
+ ��ka = eikyyeikax
337
+
338
+
339
+
340
+ 1
341
+ −iℏmv2ky+ℏv1ka(E−Ua)
342
+ (E−Ua)2−m2
343
+ iℏmv1ka+ℏv2ky(E−Ua)
344
+ (E−Ua)2−m2
345
+
346
+
347
+ � ,
348
+ a = I, II, III,
349
+ (11)
350
+ where UI = UIII = 0, UII = U0, and consequently kI = kIII.
351
+ We consider ky is fixed as a
352
+ real quantity to obtain plane-wave solutions propagating parallel to the barrier. In turn, ka is
353
+ considered a complex parameter so that we can distinguish two different regimes (see discussion
354
+ below).
355
+ The solution (11) satisfies the eigenvalue equation (5) with the eigenvalue
356
+ E = Ua ±
357
+
358
+ m2 + ℏ2(v2
359
+ 1k2a + v2
360
+ 2k2y) = Ua ±
361
+
362
+ ˜m2 + ℏ2v2
363
+ 1k2a,
364
+ (12)
365
+ where we have introduced the effective mass term
366
+ �m =
367
+
368
+ m2 + ℏ2v2
369
+ 2k2y.
370
+ (13)
371
+ From (11), we distinguish two behaviors, namely, plane-wave solutions for ka ∈ R and
372
+ evanescent-wave solutions for ka = −ipa, with pa ∈ R.
373
+ In both cases, the wave functions
374
+ are associated with real eigenvalues. They are classified as
375
+ ka ∈ R,
376
+ E(ka, ky) = Ua ±
377
+
378
+ �m + v2
379
+ 1ℏ2k2a,
380
+ E(pa, ky) ∈ (−∞, U0 − �m) ∪ (Ua + �m, ∞),
381
+ (14)
382
+ ka = −i pa,
383
+ E(pa, ky) = Ua ±
384
+
385
+ �m − v2
386
+ 1ℏ2p2a,
387
+ E(pa, ky) ∈ (Ua − �m, U0 + �m).
388
+ (15)
389
+ 6
390
+
391
+ (a)
392
+ (b) |E| > m
393
+ (c) |E| < m
394
+ Figure 3: (a) Sketch of the energy surfaces spanned by the dispersion relations (14) (orange)
395
+ and (15) (blue), together with two energy planes located at arbitrary energies |E| > m and
396
+ |E| < m. Panel (b) and panel (c) depict the contour plot generated by the interception between
397
+ the dispersion relations and the energy planes |E| > m and |E| < m, respectively. In panel (b),
398
+ ξ = arctan(ky/kI) denotes the incidence angle of the plane wave and ky;c =
399
+
400
+ E2 − m2/ℏv2 the
401
+ critical value of ky separating the evanescent-wave and plane-wave regimes.
402
+ These dispersion relations span paraboloid and hyperboloid surfaces for plane-wave and evanescent-
403
+ wave solutions, respectively. This behavior is depicted in Fig. 3a for UI = 0 (case a = I).
404
+ For |E| > m, the behavior of the solutions is classified according to the values of ky, with
405
+ ky;c =
406
+
407
+ E2 − m2/ℏv2 being the critical value. That is, for |ky| < ky;c, the solutions are plane-
408
+ wave-like and the momenta kI and ky span an elliptic curve for a fixed energy. For |ky| > ky;c,
409
+ the solutions become evanescent waves and pI and ky span a hyperbolic curve for the same fixed
410
+ energy. This is sketched in Fig. 3b. For |E| < m, no plane-wave solutions exist for ky ∈ R,
411
+ and only evanescent-wave solutions are generated. Here, pI, ky span a rotated hyperbola with
412
+ respect to the case |E| > m, as depicted in Fig. 3c.
413
+ The partial solutions Ξka at the regions I, II, III have to be combined in order to comply
414
+ with the boundary conditions (9) at x0 = ±L and |x| → ∞. The wave function takes the general
415
+ form
416
+ Ξa(x, y) = αaΞ±
417
+ ka(x, y) + βaΞ±
418
+ −ka(x, y),
419
+ a = I, II, III .
420
+ (16)
421
+ The boundary conditions (9) impose the continuity of the two upper components of the wave
422
+ function, from which we find the set of relations between the coefficients αI, βI and αIII, βIII.
423
+ That is,
424
+ M
425
+ �αI
426
+ βI
427
+
428
+ =
429
+ �αIII
430
+ βIII
431
+
432
+ ,
433
+ M =
434
+ �m11
435
+ m12
436
+ m21
437
+ m22
438
+
439
+ ,
440
+ (17)
441
+ with M being the transfer matrix, whose elements mij are functions of E, ky, m and U0. The
442
+ 7
443
+
444
+ El>m
445
+ E(k,k2)
446
+ Ek<m?
447
+ Plane-wave
448
+ <region
449
+ K
450
+ Evanescentwave
451
+ K=ki
452
+ region
453
+ K=iP1(p1,k2)
454
+ k2;c
455
+ (k1,k2)
456
+ -K2:c
457
+ K(p1,k2)
458
+ Klatter are explicitly given by
459
+ m11 = eiLkI
460
+
461
+ cos(LkII) − i sin(LkII)2v2
462
+ 1ℏ2k2
463
+ I k2
464
+ II + ˜m2(k2
465
+ I + k2
466
+ II)
467
+ 2E(E − V )kIkII
468
+
469
+ ,
470
+ (18)
471
+ m22 = e−iLkI
472
+
473
+ cos(LkII) + i sin(LkII)2v2
474
+ 1ℏ2k2
475
+ I k2
476
+ II + ˜m2(k2
477
+ I + k2
478
+ II)
479
+ 2E(E − V )kIkII
480
+
481
+ ,
482
+ (19)
483
+ m12 = isin(LkII)(k2
484
+ yv2
485
+ 2(m2 + ℏ2k2
486
+ yv2
487
+ 2) − v2
488
+ 1(m2 − ℏ2k2
489
+ yv2
490
+ 2)k2
491
+ 1 − 2iv1v2kIkyE)( U0
492
+ 2 − E)
493
+ ℏ2v2
494
+ 1kIkII(E2 − m2)(E − U0)
495
+ ,
496
+ (20)
497
+ where kI =
498
+
499
+ E2 − ˜m2/ℏv1 and kII =
500
+
501
+ (E − V )2 − ˜m2/ℏv1.
502
+ The determinant of the transfer matrix is equal to one, in coherence with conservation of
503
+ the probability current at the boundary. When kI and kII are real, there also holds m11 = m∗
504
+ 22
505
+ and m12 = m∗
506
+ 21. In the next section, we shall use the transfer matrix for determinantion of the
507
+ bound state energies as well as of the scattering characteristics of the plane-wave solutions.
508
+ Additionally to (11), the Lieb lattice supports an additional solution in the form of a flat
509
+ band, which is depicted in Fig. 2. This appears whenever E = Ua, and the eigensolutions cannot
510
+ be determined from Eq. (6)-(7). Instead, one shall solve the Dirac Hamiltonian for E = Ua,
511
+ which leads to the eigensolution Ξfb = (mχ, iℏv2kyχ, −ℏv1χ′)T , where χ = χ(x) is an arbitrary
512
+ complex-valued function. Such an indeterminacy is better understood if one chooses χ such that
513
+ Ξfb(ν, x) = eikyyeiνx
514
+
515
+
516
+ m
517
+ iℏv2ky
518
+ −iℏv1ν
519
+
520
+ � ,
521
+ (21)
522
+ which is a flat band eigensolution for any ν ∈ C. Particularly, for ν ∈ R, Eq. (21) form a set of
523
+ degenerate plane-wave solutions, usually known as degenerate Bloch waves [38] (see also Sec. 2.1
524
+ in [18]). These degenerate waves form a continuous basis that can be used to construct arbitrary
525
+ wavepackets through Fourier transforms. The latter has been exploited to construct the so-called
526
+ compact localizes states [39], which are specific linear combinations of degenerate waves localized
527
+ in each unitary cell of a finite-dimensional lattice. See [18, 40] for a more extensive discussion
528
+ on the matter.
529
+ It is clear that degenerate Bloch waves do not carry current on the x-direction, as jx =
530
+ 2v1 Re ψ∗
531
+ AψB vanishes for any ν ∈ C.
532
+ This also holds for any linear combination (finite or
533
+ infinite) of degenerate Bloch state. Thus, the current states belonging to the flat band energy
534
+ are current-free states.
535
+ Notice that the dispersion and flat bands have a touching point only for m = 0 (See Fig. 2a),
536
+ and thus one can explore the behavior of the solutions on the dispersion band when they approach
537
+ the flat band interception. It is straightforward to realize that Ξka,ky,m→0 leads to the null vector,
538
+ which is only one of the infinitely many solutions inside the flat band. For this reason, we shall
539
+ discuss the flat band and the dispersion bands separately.
540
+ 4
541
+ Electron confinement
542
+ Let us explore the possibility of bound states trapped by the electrostatic potential (10). Here,
543
+ we look for eigenvalues E so that the corresponding eigensolutions have finite norm in L2 ⊗ C3,
544
+ 8
545
+
546
+ (a) ky = 0
547
+ (b)
548
+ (c)
549
+ Figure 4: Sketch for the energy configuration associated with (10) for ky = 0 (a) and increasing
550
+ values of ky (b)-(c). The diagonal-pattern and color-shaded regions denote the area covered by
551
+ mass term m and effective mass term �m, respectively. In the panel (b), the energy (red-dashed
552
+ line) inside the region II lies out of the effective mass term (plane-wave solution), whereas in
553
+ the panel (c) they lie inside the effective mass term (evanescent-wave solution).
554
+ which implies that eigensolutions must decay asymptotically to zero in the regions I and III for
555
+ x → −∞ and x → ∞, respectively.
556
+ Following (11), we thus use evanescent-wave solutions for the regions I and III. By fixing
557
+ kI = kIII = ipI,
558
+ pI > 0,
559
+ (22)
560
+ one restricts the energies into the interval E ∈ (− �m, �m), as depicted in all the cases of Fig. 4.
561
+ The wave function composed from (16) has an exponentially vanishing behavior for |x| → ∞.
562
+ This implies that we fix αI = 0, βI = 1 and βIII = 0, and the relation (17) turns into
563
+ m12 = αIII,
564
+ m22 = 0.
565
+ (23)
566
+ The first relation determines the amplitude of the wave function in the region III, whereas
567
+ the second relation fixes the energies for the bound states. This can be written, after some
568
+ simplifications, in the following form:
569
+ tanh
570
+ ��
571
+ �m2 − (E − U0)2
572
+ ℏv1
573
+ L
574
+
575
+ = −E(E − U0)
576
+
577
+ �m2 − (E − U0)2√
578
+ �m2 − E2
579
+ (E − U0)2( �m2 − E2) + �m2U0
580
+
581
+ E − U0
582
+ 2
583
+ � .
584
+ (24)
585
+ The wave function ΞII in the intermediate region II can be either oscillatory for (E − U0)2 >
586
+ ˜m2 (we can set kII > 0 without loss of generality) or evanescent for (E − U0)2 < ˜m2 (kII = i pII,
587
+ pII > 0), see Fig. 4b and Fig. 4c, respectively.
588
+ The transcendental equation (24) allows us
589
+ determining the bound state energies as a function of ky for both cases.
590
+ Although the explicit solution E = E(ky) of (24) has to be found numerically, some pre-
591
+ liminary information can be extracted by considering large values ℏv2ky ≫ U0, m in the tran-
592
+ scendental equation (24). Here, �m ≈ ℏv2ky and the dispersion relation reduces to E2 ≈ E2
593
+ ∞ =
594
+ ℏ2(−v2
595
+ 1p2
596
+ I + v2
597
+ 2k2
598
+ y). Since pI should be a real quantity in order to remain in the evanescent-wave
599
+ regime in the regions I and III, we find that ℏv2|ky| ≥ E(ky) holds for asymptotic values of
600
+ ℏv2ky. The behavior of E(ky) is thus bounded for ℏv2ky → ∞ and can be classified into the
601
+ following in three asymptotic cases:
602
+ 9
603
+
604
+ Uo+m
605
+ Uo+m
606
+ Uo
607
+ m
608
+ Uo-m
609
+ 10%m/7/7
610
+ 0
611
+ -m
612
+ 1
613
+ II
614
+ III
615
+ -mJo+m
616
+ Uo
617
+ Uo-m
618
+ m
619
+ E
620
+ 0
621
+ -m
622
+ II
623
+ IIIUo+m
624
+ Uo
625
+ Uo-m
626
+ Uo-m
627
+ m
628
+ m
629
+ E
630
+ 0
631
+ -m
632
+ -m
633
+ II
634
+ III(a)
635
+ (b)
636
+ Figure 5: (In units of ℏ=1) (a) Bound state energies E(ky), computed from (24), as a function
637
+ of the transverse momentum ky for v1 = v2 = L = 1, m = 0.5, and U0 = 1.5. The blue-solid and
638
+ red-dashed curves indicate bound state energies for arbitrary ky, whereas green-dot-dashed and
639
+ black-dotted curves are energies emerging from a specific ky ̸= 0. The shaded area marks the
640
+ scattering-state energy region. (b) Current parallel to the barrier Jy = ∂E(ky)/∂ky associated
641
+ with the dispersion relations in (a).
642
+ • First, a valid asymptotic behavior may be of the form E(ky → ∞) → C < ∞. Substituting
643
+ the latter into (24) leads to a unique solution of the form E(ky → ∞) → C = U0/2.
644
+ • Another possible asymptotic behavior is |E(ky)| = ℏv2|ky|, which vanishes both sides
645
+ of (24). That is, |E(ky)| = ℏv2|ky| is a valid asymptotic behavior.
646
+ • The last possible asymptotic behavior is |E(ky)| < ℏv2|ky|, which leads to a contradiction
647
+ once substituted into (24). That is, such an asymptotic behavior is do not generate bound
648
+ state solutions.
649
+ We thus conclude that the eigenvalues associated with bound states, if they exist, either
650
+ converge asymptotically to U0/2 or ℏv2|ky|. Since the current density on the direction parallel
651
+ to the barrier is Jy = ∂E(ky)/∂ky ≡
652
+
653
+ R j(x, y)dx (see [41] or Appendix E in [42]), it converges
654
+ either to zero or ±ℏv2 for ky → ∞.
655
+ As an illustrative example, let us consider numerical values such that we have homogeneous
656
+ Fermi velocities v1 = v2 = 1, a mass term m = 0.5, together with a rectangular potential well
657
+ with L/ℏ = 1 and U0 = 1.5. Numerical solutions of (24) reveal the existence of two bound
658
+ states for ky = 02, and new bound states appear for increasing values of ky. This is depicted in
659
+ Fig. 5a, where one may see that energies indeed converge to either U0
660
+ 2 = 0.75 or become linear
661
+ in ℏv2ky for large enough ky. Likewise, we depict in Fig. 5b the corresponding current density
662
+ parallel to the barrier (Jy), which becomes finite or null for asymptotic ky, as predicted from
663
+ our former analysis.
664
+ • Further information is available for direct incidence, that is, ky = 0, �m = m. Here, the
665
+ effective Hamiltonian possesses the additional symmetry represented [H, Px �S] = 0, with Px is
666
+ the parity operator and �S defined in (4). This allows establishing a parity-symmetric criteria for
667
+ the wave function �Ξ with respect to Px �S, namely, we classify the solutions fulfilling the condition
668
+ Px �SΞ = ±Ξ as even (Ξ(e) for +) and odd (Ξ(e) for −). In this form, the coefficients of ΞII
669
+ 2This result agrees with the analytic formula presented in (26).
670
+ 10
671
+
672
+ 2
673
+ 0
674
+ -4
675
+ -2
676
+ 2
677
+ 4
678
+ k2(k2
679
+ -8
680
+ -4
681
+ 4
682
+ 8
683
+ K(a) U0 = 1.5
684
+ (b)
685
+ (c)
686
+ Figure 6: (In units of ℏ=1) (a) Number of even (dotted) and odd (blue-thick) bound states
687
+ as a function of L for v1 = 1, m = 0.5 and U0 = 1.5. (b) Eigensolution component ψC and
688
+ (E − U(x))ψC for L = v1 = 1 and U0 = 1.5 and the even bound state energy E ≈ 0.281398.
689
+ (c) Probability distribution associated with the eigenvalues E ≈ 0.281398 (blue-solid) and E ≈
690
+ −0.32653 (red-dashing) and the same parameters as in (b).
691
+ in (16) are αII = ±βII for even (+) and odd (−) functions, so that after evaluating the boundary
692
+ condition at x = L one obtains relations to determine the energies of even and odd states as
693
+ tan
694
+ �kIIL
695
+ 2
696
+
697
+ = F(E),
698
+ − cot
699
+ �kIIL
700
+ 2
701
+
702
+ = F(E),
703
+ F(E) =
704
+ E
705
+ U0 − E
706
+
707
+ (E − U0)2 − m2
708
+ m2 − E2
709
+ ,
710
+ (25)
711
+ respectively, with kII =
712
+
713
+ (E − U0)2 − m2/ℏv1.
714
+ Although the exact values of E cannot be analytically determined for arbitrary L, one can
715
+ still determine the exact number of even (N(e)) and odd (N(o)) bound states. The thorough
716
+ analysis (see App. A for a detailed proof) leads to
717
+ N(e) =
718
+
719
+ L
720
+ πℏv1
721
+
722
+ U0
723
+ 2
724
+
725
+ m + U0
726
+ 2
727
+
728
+ + 1
729
+ 2
730
+
731
+
732
+
733
+ L
734
+ πℏv1
735
+
736
+ U0
737
+ 2
738
+
739
+ −m + U0
740
+ 2
741
+
742
+ + 1
743
+ 2
744
+
745
+ + 1,
746
+ N(o) =
747
+
748
+ L
749
+ πℏv1
750
+
751
+ U0
752
+ 2
753
+
754
+ m + U0
755
+ 2
756
+ ��
757
+
758
+
759
+ L
760
+ πℏv1
761
+
762
+ U0
763
+ 2
764
+
765
+ −m + U0
766
+ 2
767
+ ��
768
+ + 1,
769
+ (26)
770
+ with ⌊·⌋ the floor function.
771
+ From the latter, it is clear that at least one even and one odd bound state always exist,
772
+ regardless of the potential width and strength.
773
+ Particularly, for small enough L → 0, one
774
+ obtains the E → 0 and odd E → −m as the even and odd bound state energies, respectively.
775
+ Since the floor function is discontinuous, the number of bound states does not necessarily
776
+ grow continuously for increasing values of L. That is, for L = L0 with N(e,o) bound states, there
777
+ might be a L = L1 > L0 such that (N(e,o) − 1) are generated. This is indeed depicted in Fig. 6a
778
+ for fixed potential depth and different potential length L.
779
+ As discussed in Sec. 2.1, the component ψC might not be continuous, which can lead to
780
+ discontinuous probability densities. Still, one may verify the validity of the bound state eigen-
781
+ values E obtained from (25) by substituting it into (E −U(x))ψC, which should be a continuous
782
+ function3. Particularly, from Fig. 6a, one notices that L = 1 and U0 = 1.5 lead to one even
783
+ 3It follows from kyΨB + (U(x) − E)ΨC = 0, which the third of the coupled equations represented by (5).
784
+ 11
785
+
786
+ 4
787
+ N(e)
788
+ N(o)
789
+ 3
790
+ 2
791
+ 1
792
+ 5
793
+ 13
794
+ 17
795
+ L0.4
796
+ c
797
+ (E-U(x)Wc
798
+ 0
799
+ -0.4
800
+ -2
801
+ -1
802
+ 1
803
+ 2
804
+ X0.4
805
+ 0.2
806
+ 0
807
+ -2
808
+ -1
809
+ 1
810
+ 2
811
+ X(E(e)
812
+ 0
813
+ ≈ 0.281398) and one odd (E(o)
814
+ 0
815
+ ≈ −0.32653) bound state energy eigenvalue. The compo-
816
+ nent ψC and (E −U(x))ψC are depicted in Fig. 6b for E ≈ 0.281398, which verifies the required
817
+ continuity condition for the latter function. The same conclusion is drawn for E ≈ −0.32653.
818
+ Furthermore, the corresponding probability distributions associated with �Ψ
819
+ (e) and �Ψ
820
+ (o) are de-
821
+ picted in Fig. 6c in blue-solid and red-dashed, respectively, which are discontinuous.
822
+ 5
823
+ Scattering states and transmission amplitudes
824
+ Let us now focus on the scattering of the plane waves on the barrier and the related phenomena.
825
+ This is obtained when plane-wave-like solutions are present in the regions I and III, which
826
+ corresponds to the eigenvalues E ∈ (−∞, − �m)∪( �m, ∞). Without loss of generality, we consider
827
+ only outgoing waves in region III and outgoing together with incoming waves in region I. The
828
+ coefficients of the wave function (16) are then fixed in the following manner,
829
+ αI = 1,
830
+ βI = r,
831
+ αIII = t,
832
+ βIII = 0.
833
+ (27)
834
+ The complex constants t and r can be calculated from (17) as
835
+ t =
836
+ 1
837
+ m22
838
+ ,
839
+ r = −m21
840
+ m22
841
+ .
842
+ (28)
843
+ The coefficients r and t define the reflection and transmission coefficients R = |r|2 and T = |t|2
844
+ that satisfy R + T = 1. The later expression can be directly verified by substituting from (28)
845
+ when taking into account that there holds m11 = m∗
846
+ 22 and m12 = m∗
847
+ 21. After some calculations,
848
+ one obtains,
849
+ r = sin(kIIL)
850
+ −2A(B − B′) + i
851
+
852
+ A′2 − A2 + (B − B′)2�
853
+ 2AA′ cos(kIIL) − i sin(kIIL) ((B − B′)2 + A2 + A′2),
854
+ (29)
855
+ where
856
+ A
857
+ v1
858
+ =
859
+ EkI
860
+ E2 − m2 ,
861
+ A′
862
+ v1
863
+ =
864
+ (E − U0)kII
865
+ (E − U0)2 − m2 ,
866
+ B
867
+ v2
868
+ =
869
+ mky
870
+ E2 − m2 ,
871
+ B′
872
+ v2
873
+ =
874
+ mky
875
+ (E − U0)2 − m2 ,
876
+ (30)
877
+ and kI =
878
+
879
+ E2− �m2
880
+ ℏv1
881
+ , kII =
882
+
883
+ (E−U0)2− �m2
884
+ ℏv1
885
+ . This expression also holds in cases where solutions in
886
+ the region II are evanescent waves.
887
+ Eq. (29) is a handy expression to understand the transmission of incoming waves from the
888
+ region I and traveling to the region III. Particular interest is paid to cases in which perfect
889
+ tunneling exists, T = 1. Such a tunneling is obtained whenever r = 0, which ensures that t
890
+ is a unimodular complex number. In this case, the incident and transmitted waves share their
891
+ amplitude, but the later carries a relative phase shift t as a leftover of its interaction with the
892
+ barrier. For the sake of clarity, we split our discussion in two cases.
893
+ Normal incidence (ky = 0)
894
+ In this case, the reflection coefficient becomes simpler since B = B′ = 0, kI =
895
+
896
+ E2 − m2/ℏv1,
897
+ and kII =
898
+
899
+ (E − U0)2 − m2/ℏv1. The numerator in r becomes proportional to m sin(kIIL).
900
+ 12
901
+
902
+ Therefore, for the gapless lattice setup (m = 0), perfect tunneling occurs for any arbitrary ener-
903
+ gies in E ∈ (−∞, −m)∪(m, ∞). This effect was reported in graphene [3,4,43] and pseudospin-1
904
+ lattices [11,44].
905
+ For m ̸= 0, perfect tunneling does exist for specific energies so that kIIL = nπ, with n = 1, . . ..
906
+ The exact resonant energies are straightforward to compute and are presented in a much general
907
+ case below. However, it is worth to analyze the behavior of T = 1 − |r|2 when the barrier is
908
+ large enough, U0 ≫ m, E, for fixed and finite E. The straightforward calculations show that
909
+ T ≈
910
+ 1
911
+ 1 +
912
+ m4
913
+ 4E2(E2−m2) sin2 �
914
+ U0L
915
+ ℏv1
916
+ �.
917
+ (31)
918
+ It reveals that despite the lack of the perfect tunneling, the transmission converges to a non-null
919
+ value as the electrostatic barrier increases indefinitely. This is known as Klein paradox [45], and
920
+ it is in sharp contrast with the non-relativistic case, where transmission becomes smaller for
921
+ larger barrier heights.
922
+ Oblique incidence (ky ̸= 0)
923
+ • Super-Klein tunneling When B = B′ and A = ±A′ in from (29), the reflection coefficient
924
+ vanishes and the transmission becomes perfect (T = 1). This is achieved when E = U0/2. One
925
+ thus has perfect tunneling regardless of the incidence angle for E = U0/2. This phenomenon is
926
+ called the super-Klein tunneling, already reported for pseudospin-1 lattice models with gapless
927
+ dispersion and flat bands [37,44,46], as well as in pseudospin-1/2 graphene lattices [47]. Here,
928
+ we note that the presence of the mass term (m ̸= 0) does not break the super-Klein tunneling as
929
+ long as U0 > 2m. However, super-Klein tunneling is altogether lost by tuning the electrostatic
930
+ barrier such that 0 < U0 < 2m, as no plane-wave solutions exist for E = U0/2. This highlights
931
+ the effects of the mass term (band-gap) on the transmission properties.
932
+ • Generalized Snell-Descartes law It is convenient to define the two-dimensional momentum
933
+ vectors ⃗k = (kI, ky) and ⃗k′ = (kII, ky) that characterize the incident wave and the wave traveling
934
+ through the electric barrier, respectively. The incident and transmitted angles are defined as
935
+ ξ = arctan(ky/kI) and ξ′ = arctan(ky/kII), respectively, see Fig. 7a. Contrary to the bound
936
+ state case of Sec. 4, plane-wave solutions only exist in the region I for bounded values of ky, i.e.,
937
+ |ky| < ky;c =
938
+
939
+ E2 − m2/ℏv2. This alternatively implies that scattering phenomenon is available
940
+ for restricted values of the effective-mass term �m. This is depicted in Fig. 7b, from which it is
941
+ also clear that, for |ky| > ky;c, the shaded are covered by the effective-mass region overlaps with
942
+ the energy E, leading to evanescent-wave solutions in the region I.
943
+ From the dispersion relations in the regions I and II, together with the fact that ky is constant
944
+ across all regions, one can establish a relation between the incident and transmitted angles ξ
945
+ and ξ′ of Fig. 7a,
946
+ tan ξ′
947
+ tan ξ
948
+
949
+ v2
950
+ 1 + v2
951
+ 2 tan2 ξ
952
+ v2
953
+ 1 + v2
954
+ 2 tan2 ξ′ =
955
+
956
+ E2 − m2
957
+ (E − U0)2 − m2 .
958
+ (32)
959
+ For v1 = v2, one recovers the same Snell-Descartes law previously reported for graphene [4], and
960
+ to the Snell’s law obtained for pseudospin-1 lattices with m = 0 reported in [46].
961
+ Since we are considering U0 > 2m, we get the following information about the transmitted
962
+ angle:
963
+ 13
964
+
965
+ (a)
966
+ (b)
967
+ (c)
968
+ Figure 7: (b) Scattering configuration (upper-view) for an incident wave ⃗k (region I), with inci-
969
+ dent angle ξ, traveling through an electrostatic barrier (green-shaded area). The wave refracts
970
+ into region II as a wave with vector ⃗k′ and transmitted angle ξ′. (b) Energy configuration of
971
+ the panel (a) with an incident wave with energy E > �m (red-dashed line). (c) Energy curves
972
+ spanned by κ, ky (region I and III) and κ′, ky (region II) for E > �m fixed as in panel (b).
973
+ • For E ∈
974
+
975
+ m, U0
976
+ 2
977
+
978
+ , there exists a transmitted angle ξ′ for every incident angle ξ ∈ (−π/2, π/2).
979
+ • For E = U0
980
+ 2 , the transmitted and incident angles are equal, ξ′ = ξ.
981
+ • For E ∈
982
+ � U0
983
+ 2 , U0 − m
984
+
985
+ ∪ (U0 + m, ∞), there are transmitted angles ξ′ ∈ (−π/2, π/2) only
986
+ for ξ ∈ (−ξc, ξc), with the critical angle tan2 ξc = v2
987
+ 1
988
+ v2
989
+ 2
990
+ (E−U0)2−m2
991
+ 2U0
992
+
993
+ E− U0
994
+ 2
995
+ � . For other values of ξ, the
996
+ solutions in the region II are evanescent waves.
997
+ • For E ∈ (U0 − m, U0 + m), there are only evanescent waves in the region II.
998
+ • Fabry-P´erot resonances Perfect transmission occurs for other energies as well, nevertheless,
999
+ it gets angle dependent. The reflection coefficient (29) vanishes for kIIL = nπ, with n ∈ Z+.
1000
+ Since kII is in turn a function of the incidence angle ξ, once may conclude that perfect reflection
1001
+ appears only for some specific incidence angles. These are usually known as tunneling resonances
1002
+ or Fabry-P´erot resonances [4], and are given as a function of the incident angles ξ as
1003
+ E(res)
1004
+ ±;n =
1005
+
1006
+ 1 + v2
1007
+ 2
1008
+ v2
1009
+ 1
1010
+ tan2 ξ
1011
+
1012
+
1013
+
1014
+ �U0 ±
1015
+
1016
+
1017
+
1018
+
1019
+ �U 2
1020
+ 0 −
1021
+ 1
1022
+ 1 + v2
1023
+ 2
1024
+ v2
1025
+ 1 tan2 ξ
1026
+
1027
+ �U 2
1028
+ 0 − π2v2
1029
+ 1(n + 1)2
1030
+ L2
1031
+
1032
+ m2
1033
+ 1 + v2
1034
+ 2
1035
+ v2
1036
+ 1 tan2 ξ
1037
+
1038
+
1039
+
1040
+
1041
+ � ,
1042
+ (33)
1043
+ with n = 0, 1, . . ..
1044
+ These resonant energies behave asymptotically as limξ→±π/2 E(res)
1045
+ +;n → ∞ and limξ→±π/2 E(res)
1046
+ −;n →
1047
+ (2U0)−1 �
1048
+ U 2
1049
+ 0 − π2 v2
1050
+ 1(n+1)2
1051
+ L2
1052
+
1053
+ . Thus, for almost perpendicular incident waves (ξ ∼ ±π/2), one re-
1054
+ quires larger and larger energies in order to recover the resonances at E(res)
1055
+ +;n , whereas finite and
1056
+ well-defined energy values are required for the resonances E(res)
1057
+ −;n . This behavior is depicted in
1058
+ Fig. 8a.
1059
+ 14
1060
+
1061
+ I
1062
+ I1
1063
+ III
1064
+ k
1065
+ Ki
1066
+ k
1067
+ 3
1068
+ 1KUo+m
1069
+ Uo+m
1070
+ Uo
1071
+ Uo-m
1072
+ Uo-m
1073
+ E
1074
+ m
1075
+ m
1076
+ 0
1077
+ -m
1078
+ -m
1079
+ II
1080
+ IIIRegion
1081
+ Region
1082
+ 1
1083
+ II
1084
+ K(a)
1085
+ Figure 8: (Units of ℏ = 1) (a) Tunneling resonance energies E+;n (blue-solid) and E−;n (orange-
1086
+ dashed) as a function of ξ(−π/2, π/2). The inset depicts the transmission amplitude T as a
1087
+ function of E for ξ = 0. The shaded area denotes the region where the duple (ξ, E) produces
1088
+ evanescent waves in the region I. The parameters have been fixed as v1 = v2 = 1, m = 0.5 and
1089
+ U0 = 1.5.
1090
+ 6
1091
+ Remarks on the flat-band solutions
1092
+ The piece-wise continuous nature of the electrostatic interaction (10) allows the generation of
1093
+ two flat band energies, one located at E = U0 for the region II, and another one at E = 0 for
1094
+ the regions I and III. Although the boundary conditions are the same in both cases, the allowed
1095
+ matching solutions have a different behavior.
1096
+ Let us first consider E = U0 and ky so that plane-wave solutions exist for the regions I and
1097
+ III. For generality, we consider incoming and outgoing plane waves in regions I and III, and a
1098
+ general flat-band solution in II. Here, the waves entering the interaction zone from the left and
1099
+ right have an amplitude I1 and I2, respectively, with I1,2 ∈ R. Additionally, we fix I2
1100
+ 1 + I2
1101
+ 2 = 1.
1102
+ Under these considerations, we have the general solutions
1103
+ Ξ =
1104
+
1105
+
1106
+
1107
+
1108
+
1109
+ I1ΞkI + A1Ξ−kI
1110
+ x < −L/2
1111
+ Ξfb
1112
+ |x| < L/2
1113
+ I2ΞkI + A2Ξ−kI
1114
+ x > L/2
1115
+ (34)
1116
+ where A1,2 ∈ C, Ξ = (mχ, iℏv2kyχ, −iℏv1χ′)T , with χ a complex-valued function, and Ξ±kI the
1117
+ solutions (11) evaluated at E = U0. By imposing the boundary conditions (9), one obtains the
1118
+ relations A1 = I1e−2iφeikIL and A2 = I2e2iφe−ikIL, with φ = arctan(v2kyU0/mv1kI), whereas the
1119
+ arbitrary function χ is restricted to fulfill the following relations at the boundaries,
1120
+ χ
1121
+
1122
+ − L
1123
+ 2
1124
+
1125
+ =
1126
+ 2v1kII1e−i(φ−kI L)
1127
+
1128
+ m2v1k2
1129
+ I + v2k2yU 2
1130
+ 0
1131
+ ,
1132
+ χ
1133
+ � L
1134
+ 2
1135
+
1136
+ = −
1137
+ 2v1kII2ei(φ−kI L)
1138
+
1139
+ m2v1k2
1140
+ I + v2k2yU 2
1141
+ 0
1142
+ .
1143
+ (35)
1144
+ Given the arbitrary nature of χ, one may alternatively rewrite it as χ = 2v1kIei(φ−kI L)2x/L
1145
+
1146
+ m2v1k2
1147
+ I +v2k2yU2
1148
+ 0 �χ, where
1149
+ �χ(−L/2) = I1 and �χ(L/2) = −I2.
1150
+ 15
1151
+
1152
+ 20
1153
+ 10
1154
+ -20
1155
+ E
1156
+ 20
1157
+ E(res)
1158
+ n
1159
+ -10
1160
+ -20
1161
+
1162
+ 3
1163
+ 2
1164
+ 4
1165
+ 4
1166
+ 2Thus, the coupling of incident waves to the flat-band solution leads to a scattering problem
1167
+ in which the waves entering the interaction region are completely reflected inside their respective
1168
+ regions. Still, the ��at band solutions allowed during such a process must fulfill the boundary
1169
+ conditions (35). Note that one also has the conservation property |A1|2 + |A2|2 = I2
1170
+ 1 + I2
1171
+ 2 = 1.
1172
+ The latter results hold whenever waves enter from only one region, say I1 = 1 and I2 = 0. In
1173
+ such a case, we have a perfect reflection in region I, up to a phase in the reflected wave.
1174
+ Flat-band solutions also occur for E = 0 in regions I and III. The arbitrary nature of the
1175
+ solutions in those flat bands can be tuned so that finite-norm solutions appear. The correspond-
1176
+ ing wave function in region II can be found using the boundary conditions, and the calculations
1177
+ are as straightforward as the scattering case presented above.
1178
+ 7
1179
+ Concluding remarks
1180
+ In this manuscript, it was shown that the existence of a rectangular electrostatic barrier always
1181
+ produces at least two bound states for ky = 0, and generates more bound states at different
1182
+ energies for increasing values of ky. Interestingly, it was found that even for the asymptotic
1183
+ values ℏv2ky → ∞, the associated current density parallel to the barrier is bounded by ±ℏv2,
1184
+ where v2 = 2at2. Thus, the current is linear on the hopping amplitude across the ˆy-direction,
1185
+ as expected.
1186
+ It is worth remarking that dispersion relations obtained from (24) identify the energies for
1187
+ which electrons localize in the x-direction, and propagation is allowed in the ˆy-direction is still
1188
+ possible. However, by exploiting the separability of free-particle solutions, one can always con-
1189
+ struct linear combinations so that electrons localize in the ˆy-direction as well. Such a procedure
1190
+ has been discussed in [48] for graphene. For instance, in the example provided in Fig. 5a, one
1191
+ can take the energies associated with blue-solid and red-dashed curves as they exist for any
1192
+ ky ∈ R. From the relations (26), one can ensure that at least two of such dispersion relations
1193
+ always exist. Additional caution must be taken for the other dispersion relations, as they only
1194
+ exist for intervals ky ∈ S ⊆ R, and the linear combination must be constructed accordingly to
1195
+ that interval. Devising such packages is a task beyond the scope of the current work and will
1196
+ be discussed elsewhere, as it deserves attention by itself.
1197
+ On the one hand, for the scattering-wave regime, we have proved that even in the gapped
1198
+ case (m ̸= 0), the Lieb lattice supports super-Klein tunneling for an energy equal to half of
1199
+ the electric barrier, E = U0/2, provided that U0 > 2m. For the gapless case, we recover the
1200
+ same results previously reported for gapless T3 lattices [44] and ultra-cold atoms trapped in
1201
+ optical lattices [24]. On the other hand, we identified a new modified Snell-like law valid for
1202
+ anisotropic Fermi velocities v1 ̸= v2. The latter allows us to identify the Fabry-P´erot resonant
1203
+ transmission, which defines a relation between the incident energy and the incident-wave angle
1204
+ required to produce perfect tunneling up to a phase factor. Interestingly, for negative energies,
1205
+ perfect transmission is achievable for finite energies at incident waves almost perpendicular to
1206
+ the barrier. This is not the case for positive energies, as it is shown that the required energies
1207
+ diverge.
1208
+ The existence of flat-band solutions poses an additional case not available in graphene lattices.
1209
+ The latter allows the coupling of the solutions and determining the transmission properties,
1210
+ which in this case, leads to perfectly reflected waves. Since the flat-band solutions are defined
1211
+ 16
1212
+
1213
+ in terms of degenerate Bloch waves, there is an infinite family of solutions that allows such a
1214
+ reflection, as long as they fulfill the boundary condition (35).
1215
+ Acknowledgments
1216
+ K.Z. acknowledges the support from the project “Physicists on the move II” (KINE´O II)
1217
+ funded by the Ministry of Education, Youth, and Sports of the Czech Republic, Grant No.
1218
+ CZ.02.2.69/0.0/0.0/18 053/0017163.
1219
+ A
1220
+ Determining the number of even and odd bound states
1221
+ In this appendix, we present the derivation of the number of bound states for even bound
1222
+ states presented in (26). The procedure applies straightforwardly to the odd case as well. It is
1223
+ convenient to define the intervals
1224
+ I0 =
1225
+
1226
+ 0, π
1227
+ 2
1228
+
1229
+ ,
1230
+ In =
1231
+ �π
1232
+ 2 + (n − 1)π, π
1233
+ 2 + nπ
1234
+
1235
+ ,
1236
+ n = 1, 2, . . . ,
1237
+ (A-1)
1238
+ so that tan(x) is nonsingular for x ∈ In. Furthermore, if x ∈ ∪k=p2
1239
+ k=p1Ik, then tan(x) has (p2 − p1)
1240
+ singularities.
1241
+ To determine the number of even bound states, one must find the number of interceptions
1242
+ of F(E) in (25) and the periodic function tan(kIIL
1243
+ 2 ) in the interval E ∈ (−m, m). To this end,
1244
+ one may notice that F(E) is a monotonously increasing function of E ∈ (−m, m) that tends to
1245
+ ∓∞ for E → ∓m, and vanishes for E = 0. The latter means that F(E) defines the bijection
1246
+ F(E) : (−m, m) �→ R.
1247
+ On the other hand, ∂kII/∂E < 0 for E ∈ (−m, m), and one thus
1248
+ concludes that tan(kIIL/2) (and also − cot(kIIL/2)) is a monotonously decreasing function of E
1249
+ in each of the intervals kIIL
1250
+ 2
1251
+ ∈ In, with n = 0, 1, . . .. This property, combined with the fact that
1252
+ F(E) : (−m, m) �→ R is a monotonously increasing function, one concludes that interception of
1253
+ both functions always exist. One must determine the exact number of interceptions.
1254
+ By exploiting the fact that tan(kII L/2) is a periodic function, one just needs to count the
1255
+ number of periods inside the interval E ∈ (−m, m) for arbitrary U0 and L, which is equal to the
1256
+ number of singularities plus one. m is a lattice parameter, so it is assumed to be a fixed value.
1257
+ Be σ± := kIIL
1258
+ 2 |E=∓m =
1259
+ L
1260
+ ℏv1
1261
+
1262
+ U0
1263
+ 2
1264
+
1265
+ ±m + U0
1266
+ 2
1267
+
1268
+ so that the domain of tan
1269
+
1270
+ kIIL
1271
+ 2
1272
+
1273
+ lies in the interval
1274
+ (σ−, σ+) for E ∈ (−m, m).
1275
+ Now, if σ− ∈ Ir1 and σ+ ∈ Ir2, with r2 > r1 and r1,2 = 0, 1, . . ., then, tan(kIIL/2) has
1276
+ r2 − r1 singularities for E ∈ (−m, m) and intercepts F(E) exactly (r2 − r1 + 1)-times. That is,
1277
+ N(e) = r2 − r1 + 1. The values of r1,2 are found by exploiting the fact that ⌊ x
1278
+ π + 1
1279
+ 2⌋ = r for
1280
+ x ∈ Ir. One thus has ⌊ σ−
1281
+ π + 1
1282
+ 2⌋ = r1 and ⌊ σ+
1283
+ π + 1
1284
+ 2⌋ = r2, which leads to the expression presented
1285
+ in (26).
1286
+ The same procedure applies to the odd solutions, where we define the intervals �In = (nπ, (n+
1287
+ 1)π), with n = 0, 1, . . ., so that cot(x) is nonsingular for x ∈ �In.
1288
+ Since − cot(kIIL/2) is
1289
+ monotonously decreasing for kIIL/2 ∈ �In, the same same reasoning used in the even case applies
1290
+ to the odd case, and one obtains N(o) in (26).
1291
+ 17
1292
+
1293
+ References
1294
+ [1] T.O. Wehling, A.M. Black-Shaffer, and A.V. Balatsky, “Dirac materials,” Adv. Phys. 63, 1
1295
+ (2014).
1296
+ [2] H. Aoki, M.S. Dresselhaus (eds.), Physics of Graphene (Springer International Publishing,
1297
+ Switzerland, 2014).
1298
+ [3] M.I. Katsnelson, K.S. Novoselov, and A.K, Geim, “Chiral tunnelling and the Klein paradox
1299
+ in graphene,” Nat. Phys. 2, 620–625 (2006).
1300
+ [4] P.E. Allain and J.N. Fuchs, “Klein tunneling in graphene: optics with massless electrons,”
1301
+ Eur. Phys. J. B 83, 301 (2011).
1302
+ [5] M. Yang et al., “Lorentz-violating type-II Dirac fermions in transition metal dichalcogenide
1303
+ PtTe2,” Nat. Commun. 8, 257 (2017).
1304
+ [6] H. Zhang, Y. Xie , C. Zhong, Z. Zhang, and Y. Chen, “Tunable type-I and type-II Dirac
1305
+ Fermions in graphene with nitrogen line defects,” J. Phys. Chem. C 121, 12476 (2017).
1306
+ [7] E. McCann, V.I. Falko, “Landau-level degeneracy and quantum hall effect in a graphite
1307
+ bilayer,” Phys. Rev. Lett. 96, 086805 (2006).
1308
+ [8] F.D.M. Haldane, “Model for a Quantum Hall Effect without Landau Levels: Condensed-
1309
+ Matter Realization of the ‘Parity Anomaly’,” Phys. Rev. Lett. 61, 2015 (1988).
1310
+ [9] T. Tummuru, S. Plugge, and M. Franz, “Josephson effects in twisted cuprate bilayers,”
1311
+ Phys. Rev. B 105, 064501 (2022).
1312
+ [10] M. Mekata, “Kagome: The Story of the Basketweave Lattice,” Phys. Today 56, 12 (2003)
1313
+ [11] E. Illes, “Properties of the α − T3 model,” Ph.D. thesis.
1314
+ [12] B. Dey and T.K. Ghosh, “Floquet topological phase transition in the α−T3 lattice,” Phys.
1315
+ Rev. B 99, 205429 (2019).
1316
+ [13] V. Jakubsk´y and K. Zelaya, “Landau levels and snake states of pseudo-spin-1 Dirac-like
1317
+ electrons in gapped Lieb lattices,” J. Phys.: Condense Matt. 51 (2023) 025302.
1318
+ [14] N. Goldman, D.F. Urban, and D. Bercioux, “Topological phases for fermionic cold atoms
1319
+ on the Lieb lattice,” Phys. Rev. A 83, 063601 (2011).
1320
+ [15] W. Jiang et al., “Topological band evolution between Lieb and kagome lattices,” Physical
1321
+ Review B, 99(12) (2019). doi:10.1103/physrevb.99.125131
1322
+ [16] R. Fan, L. Sun, X. Shao, Y. Li, and M. Zhao, “Two-dimensional Dirac materials:
1323
+ tight-binding lattice models and material candidates,” ChemPhysMater (2022) accepted
1324
+ (https://doi.org/10.1016/j.chphma.2022.04.009).
1325
+ [17] L. Yan and P. Liljeroth, “Engineered electronic states in atomically precise artificial lattices
1326
+ and graphene nanoribbons,” Advances in Physics: X 4, 1651672 (2019). doi:
1327
+ 18
1328
+
1329
+ [18] D. Leykam, A. Andreanov and S. Flach, “Artificial flat band systems: from lattice models
1330
+ to experiments,” Advances in Physics: X 3, 1473052 (2018).
1331
+ [19] D. Guzm´an-Silva et al., “Experimental observation of bulk and edge transport in photonic
1332
+ Lieb lattices,” New J. Phys. 16, 063061 (2014).
1333
+ [20] R. A. Vicencio et al., “Observation of Localized States in Lieb Photonic Lattices,” Phys.
1334
+ Rev. Lett. 114, 245503 (2015).
1335
+ [21] F. Diebel et al., “Conical Diffraction and Composite Lieb Bosons in Photonic Lattices,”
1336
+ Phys. Rev. Lett. 116, 183902 (2016).
1337
+ [22] S. Mukherjee, A. Spracklen, D. Choudhury, N. Goldman, P. ¨Ohberg, E. Andersson, R.R.
1338
+ Thomson, “Observation of a Localized Flat-Band State in a Photonic Lieb Lattice,” Phys.
1339
+ Rev. Lett. 114 (24), 245504 (2015).
1340
+ [23] R. A. Vicencio, C. Cantillano, L. Morales-Inostroza, B. Real, C. Mej´ıa-Cort´es, S. Weimann,
1341
+ A. Szameit, and M.I. Molina, “Observation of Localized States in Lieb Photonic Lattices,”
1342
+ Phys. Rev. Lett. 114, 245503 (2015).
1343
+ [24] R. Shen, L.B. Shao, B. Wang, and D.Y. Xing, “Single Dirac cone with a flat band touching
1344
+ on line-centered-square optical lattices,” Phys. Rev. B 81, 041410(R) (2010).
1345
+ [25] M.R. Slot et al., “Experimental realization and characterization of an electronic Lieb lat-
1346
+ tice,” Nat. Phys. 13, 672 (2017).
1347
+ [26] B. Cui, X. Zheng, J. Wang, D. Liu, S. Xie, B. Huang, “Realization of Lieb lattice in covalent-
1348
+ organic frameworks with tunable topology and magnetism,” Nature Communications 11,
1349
+ 66 (2020).
1350
+ [27] D. Green, L. Santos, and C. Chamon, “Isolated Flat Bands and Spin-1 Conical Bands in
1351
+ Two-Dimensional Lattices,” arXiv:1004.0708
1352
+ [28] Y. Klymenko, L. Malysheva, and A. Onipko, “Electron transmission through step- and
1353
+ barrier-like potentials in graphene ribbons,” Phys. Status Solidi B 245, 2181-2184 (2008).
1354
+ [29] L. Wei-Tao, L. Wen, and Y. Cheng-Zhi, “Enlarged band gap and electron switch in
1355
+ graphene-based step-barrier structure,” Appl. Phys. Lett. 103, 192102 (2013).
1356
+ [30] J.H. Bardarson, M. Titov, and P.W. Brouwer, “Electrostatic Confinement of Electrons in
1357
+ an Integrable Graphene Quantum Dot,” Phys. Rev. Lett. 102, 226803 (2009).
1358
+ [31] Y. Long and J. Ren, “Topological Landau-Zener Bloch Oscillations in Photonic Floquet
1359
+ Lieb Lattices,” arXiv:1706.01107 (2017).
1360
+ [32] S.A. Owerre, “Photoinduced Topological Phase Transitions in Topological Magnon Insula-
1361
+ tors,” Sci Rep 8, 4431 (2018).
1362
+ [33] C.-Z. Chang et al., “Experimental Observation of the Quantum Anomalous Hall Effect in
1363
+ a Magnetic Topological Insulator,” Science 340, 167 (2013).
1364
+ [34] S. Xing et al., “Theory, properties and engineering of 2D magnetic materials” Progress in
1365
+ Material Science 132, 101036 (2023).
1366
+ 19
1367
+
1368
+ [35] R. Peierls, “Zur Theorie der galvanomagnetischen Effekte,” Zeits. fur Physik 53, 255 (1929).
1369
+ [36] F. Bloch, “¨Uber die Quantenmechanik der Elektronen in Kristallgittern,” Zeits. fur Physik
1370
+ 52, 555 (1928).
1371
+ [37] H. Xu and Y.-C. Lai, “Superscattering of a pseudospin-1 wave in a photonic lattice,” Phys.
1372
+ Rev. A 95, 012119 (2017).
1373
+ [38] H. Aoki, H. Ando, and H. Hatsumura, “Hofstadter butterflies for flat bands,” Phys. Rev.
1374
+ B. 54, 17296 (1996).
1375
+ [39] D.L. Bergman, C. Wu, and L. Balents, “Band touching from real-space topology in frus-
1376
+ trated hopping models,” Phys. Rev. B 78, 125104 (2008).
1377
+ [40] J.-W. Rhim and B.-Jung Yang, “Singular flat bands,” Adv. Phys. X 6, 1901606 (2021).
1378
+ [41] T. K. Ghosh, A. De Martino, W. H¨ausler, L. Dell’Anna, and R. Egger, “Conductance
1379
+ quantization and snake states in graphene magnetic waveguides,” Phys. Rev. B 77, 081404
1380
+ (2008).
1381
+ [42] W. Ashcroft and N.D. Mermin, Solid State Physics (Saunders College, Philadelphia, 1976).
1382
+ [43] V. Jakubsk´y, L. M. Nieto and M. S. Plyushchay, “Klein tunneling in carbon nanostructures:
1383
+ A Free particle dynamics in disguise,” Phys. Rev. D 83, 047702 (2011).
1384
+ [44] D.F. Urban, D. Bercioux, M. Wimmer, and W. H¨ausler, Barrier transmission of Dirac-like
1385
+ pseudospin-one particles, Phys. Rev. B 84, 115136 (2011).
1386
+ [45] N. Dombey and A. Calogeracos, “Seventy years of the Klein paradox,” Phys. Rep. 315, 41
1387
+ (1999).
1388
+ [46] Y. Betancur-Ocampo, G. Cordourier-Maruri, V. Gupta, and R. de Coss, “Super-Klein
1389
+ tunneling of massive pseudospin-one particles,” Phys. Rev. B 96, 023404 (2017).
1390
+ [47] A. Contreras-Astorga, F. Correa, and V. Jakubsk´y, Super-Klein tunneling of Dirac fermions
1391
+ through electrostatic gratings in graphene, Phys. Rev. B 102, 115429 (2020).
1392
+ [48] V. Jakubsk´y and M. Tuˇsek, “Dispersionless wave packets in Dirac materials,” Annals of
1393
+ Physics 378, 171 (2017).
1394
+ 20
1395
+
AdE1T4oBgHgl3EQf9Aai/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
BtFKT4oBgHgl3EQfXS78/content/2301.11794v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25a98f12b951e4eb1ba486615982763c875f354a03b252ae1a128126503ba235
3
+ size 2910540
F9E5T4oBgHgl3EQfVg_E/content/tmp_files/2301.05552v1.pdf.txt ADDED
@@ -0,0 +1,1587 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Application of the partial Dirichlet-Neumann contact algorithm to simulate
2
+ low-velocity impact events on composite structures
3
+ G. Guillameta,∗, A. Quintanas-Corominasa,b,c, M. Riveroa, G. Houzeauxa, M. V´azqueza, A. Turonb
4
+ aBarcelona Supercomputing Center (BSC), Pla¸ca Eusebi G¨uell, 1-3, Barcelona, 08034, Catalonia, Spain
5
+ bAMADE, Universitat de Girona, Av. Universitat de Girona 4, Girona, 17003, Catalonia, Spain
6
+ cDepartment of Civil and Environmental Engineering, Imperial College London, London, SW7 2AZ, UK
7
+ Abstract
8
+ Impact simulations for damage resistance analysis are computationally intensive due to contact algo-
9
+ rithms and advanced damage models. Both methods, which are the main ingredients in an impact event, re-
10
+ quire refined meshes at the contact zone to obtain accurate predictions of the contact force and damage onset
11
+ and propagation through the material. This work presents the application of the partial Dirichlet-Neumann
12
+ contact algorithm to simulate low-velocity impact problems on composite structures using High-Performance
13
+ Computing. This algorithm is devised for parallel finite element codes running on supercomputers, and it
14
+ is extended to explicit time integration schemes to solve impact problems including damage. The proposed
15
+ framework is validated with a standard test for damage resistance on fiber-reinforced polymer matrix com-
16
+ posites. Moreover, the parallel performance of the proposed algorithm has been evaluated in a mesh of 74M
17
+ of elements running with 2400 processors.
18
+ Keywords:
19
+ Contact mechanics, Damage modeling, Finite element analysis, High-Performance Computing
20
+ 1. Introduction
21
+ Impacts by foreign objects against any part of the aircraft are a major concern for the aerospace industry
22
+ because they may compromise the structural integrity of the aircraft. Impact events can be classified into
23
+ three main categories: low, high (including ballistics), and hyper-high velocity impacts [1].
24
+ During the
25
+ impact, the energy by the foreign object (projectile) is transferred to the target (structure), and consequently,
26
+ the material can be damaged. Concretely, low-velocity impact events on composite materials (e.g., tool drops
27
+ during maintenance or manufacturing) can drastically reduce the residual strength of the part even for case
28
+ scenarios of Barely Visible Impact Damage (BVID). Therefore, designing composite structures with damage
29
+ resistance cannot be avoided.
30
+ Moreover, extensive experimental campaigns particularly focused on the
31
+ ∗Corresponding author
32
+ Email address: gerard.guillamet@bsc.es (G. Guillamet)
33
+ Preprint submitted to Composites Part A: Applied Science and Manufacturing
34
+ January 16, 2023
35
+ arXiv:2301.05552v1 [cs.CE] 11 Jan 2023
36
+
37
+ investigation and evaluation of damage resistance of a specific material may be prohibitive by the industry
38
+ in terms of costs.
39
+ Thus, virtual testing of impact events is of great interest as mathematical models and technology advance.
40
+ However, solving the physics behind this problem, particularly from the material point of view, is still one
41
+ of the most complex and challenging problems today. A review of existing software for composite impact
42
+ modeling focused on low-velocity events is conducted by Nguyen et al. [38]. In this review, the constitutive
43
+ damage models play an essential role apart from the methods such as the contact algorithm or temporal
44
+ integration scheme. Most of them can capture the trends and peak forces reasonably well. The research
45
+ community has put and continues to put a lot of effort into developing reliable constitutive damage models
46
+ for composites. Remarkable progress has been made on specific methodologies and constitutive damage
47
+ models for predicting the damage resistance and damage tolerance of composite structures [28, 10, 17, 53,
48
+ 29, 52, 51, 15]. However, in terms of computational performance, the resolution of an impact problem,
49
+ including different sources of damage, is still computationally demanding. The use of sophisticated contact
50
+ algorithms and advanced damage models require refined element meshes to accurately predict the onset and
51
+ propagation of the damage in such materials.
52
+ The most commonly used contact algorithms for the resolution of an impact problem are the Penalty
53
+ methods [21], Classical Lagrange multipliers [4, 16] or the Augmented Lagrange multipliers. The latter is
54
+ often chosen to solve the contact inequality constraints, see [58]. However, the parallel aspects of these
55
+ traditional contact algorithms are not trivial, and to the authors’ knowledge, little effort has been invested
56
+ in the parallel aspects of such algorithms and their scalability in supercomputers. Some research works
57
+ dealing with the parallel aspects of contact algorithms are [34, 35, 21].
58
+ A completely different approach to the previous algorithms is the method of partial Dirichlet-Neumann
59
+ (PDN) conditions. The contact is tackled as a coupled problem, in which the contacting bodies are treated
60
+ separately, in a staggered way. The coupling is performed through the exchange of boundary conditions at the
61
+ contact interface following a Gauss-Seidel strategy. The pioneering works using this approach are conducted
62
+ by Krause and Wohlmuth [24] and Yastrebov [59], showing the capabilities of solving nonlinear contact
63
+ problems. To the authors’ knowledge, one of the first applications of this method for explicit dynamics is
64
+ made by Lapeer et al. [25], where the PDN method was used to simulate natural childbirth using explicit
65
+ dynamics and executed in a hybrid system with Central Processing Units (CPU) and Graphics Processing
66
+ Unit (GPU) architectures. However, little attention is dedicated to the computational performance and
67
+ the parallel aspects of dealing with large-scale models. More recently, this method has been adapted and
68
+ implemented in parallel in the Alya multiphysics code [57] by Rivero [46] and published by the authors in
69
+ [19]. The mathematical and the parallel aspects are described in detail in these works, demonstrating the
70
+ benefits of the PDN contact algorithm in High-Performance Computing (HPC) systems.
71
+ In this paper, we present the application of the aforementioned method proposed by the authors in [46, 19]
72
+ 2
73
+
74
+ for the resolution of low-velocity impact problems for composite materials. Existing time integration schemes
75
+ and constitutive models from the literature have also been adapted and implemented within a parallel
76
+ framework. So the main contribution of the present paper is focused on the extension of the PDN contact
77
+ algorithm for explicit time integration schemes and its use in HPC systems involving impact events in the
78
+ field of composite materials. Additionally, a new mesh multiplication algorithm is presented to deal with
79
+ cohesive elements and element technologies such as continuum shell elements.
80
+ The content of this paper is structured as follows. Firstly, the methods for the resolution of low-velocity
81
+ impact events on composite materials including damage are explained with a strong emphasis on the contact
82
+ algorithm and its implementation in parallel codes based on the finite element method. Then, the algorithm
83
+ is validated through three benchmark tests.
84
+ The first one consists of a quasi-static indentation test to
85
+ verify that the contact pressure is well captured by using implicit and explicit time integration schemes.
86
+ The second and the third examples correspond to a low-velocity impact on a composite plate following the
87
+ ASTM International standard to measure the damage resistance of fiber-reinforced polymers. These last
88
+ examples, use different material systems which are quite used by the aerospace industry, the T800S/M21 and
89
+ the AS4/8552 carbon/epoxy systems. The numerical predictions obtained are correlated with experiments,
90
+ and the computational performance is analyzed and discussed for the coupon made of AS4/8552 material.
91
+ Finally, the conclusions of this work are commented on together with future work to improve the simulation
92
+ of impact events including damage.
93
+ 2. Modeling framework for low velocity impact events using High-performance systems
94
+ This section describes the modeling framework and the application of the partial Dirichlet-Neumann
95
+ (PDN) contact algorithm for the simulation of impact events. Particular emphasis is put on the extension of
96
+ such contact algorithm for explicit dynamic analysis. All the methods presented here are implemented in the
97
+ Alya multiphysics code [57] based on the Finite Element Method (FEM). This parallel code is based on high-
98
+ performance programming techniques for distributed and shared memory supercomputers. Moreover, the
99
+ methods are programmed using the total Langrangian formulation, where stresses and strains are measured
100
+ with respect to the original configuration. The Green strain measure and the 2nd Piola-Kirchoff stress are
101
+ used, and we follow the notation from Belytschko et al. [6] throughout the paper. As an impact event is
102
+ a complex and computationally demanding engineering problem is very attractive to be solved using High-
103
+ Performance Computing. It is worth highlighting that all the methods described here can be implemented
104
+ in other parallel FEM codes.
105
+ 2.1. Partial Dirichlet-Neumann contact algorithm
106
+ The low-velocity impact event proposed in this paper can be assumed as a non-linear contact problem,
107
+ where the striker is considered as a rigid body and the plate as a deformable body. Let’s assume that both
108
+ 3
109
+
110
+ body instances are of arbitrary shape, and we do not consider friction. Therefore, this contact problem can
111
+ be written as a boundary value problem, see Yastrebov [60], which includes the Hertz-Signorini-Moreau law
112
+ for normal contact. So the balance of momentum and the contact conditions can be written as follows:
113
+ ∇ · σ + f v = 0
114
+ in Ω
115
+ σ · n = σ0
116
+ on ΓN
117
+ u = u0
118
+ on ΓD
119
+ g ≥ 0, σn ≤ 0, σn g = 0, σt = 0
120
+ on ΓC
121
+ (1)
122
+ being σ the Cauchy stress tensor, f v a vector of volumetric forces, σ0 a set of prescribed tractions on the
123
+ Neumann boundary, ΓN; u0 a set of prescribed displacements on the Dirichlet boundary, ΓD. Over the
124
+ contact boundary, ΓC, we have imposed the following conditions: g represents the gap between contacting
125
+ bodies, σn is the normal contact pressure, and σt is the tangential stress. The tangential stress equal to
126
+ zero (σt = 0) in Eq. (1) characterizes a frictionless contact case.
127
+ In order to satisfy the conditions in Eq. (1), the present paper uses the partial Dirichlet-Neumann
128
+ contact algorithm proposed by Rivero [46, 19] which is based on the work from Yastrebov [60]. In the works
129
+ mentioned above, the method was applied for implicit time integration schemes, while in the present work,
130
+ the algorithm is extended to explicit schemes. The main benefits of the PDN contact algorithm to typical
131
+ Penalty or Lagrange Multipliers methods are the following: (i) the size of the problem does not increase due
132
+ to the Lagrange multipliers methods as unknowns (ii) no restriction with respect to the mesh partitioner
133
+ due to the use of contact elements (iii) absence of contact tangent matrices (implicit schemes) and residual
134
+ contact force vectors and (iv) easy to be parallelized as it can be treated as a solid-to-solid coupling using
135
+ existing methods for multiphysics applications such as the Gauss-Seidel scheme.
136
+ The iterative process of the PDN contact algorithm in a frictionless problem is shown in Fig. 1. Let’s
137
+ assume that the time of the simulation is 0 < t < tE and it is subdivided into nT S time steps ranging
138
+ from n = 1...nT S and tE is the time at the end of the simulation. At time step n there is no interaction
139
+ between both code instances, so no contact is detected (Fig. 1a). Then, at time tn+1, contact is detected
140
+ as we have overlapping between both bodies. At this step, the non-penetration boundary conditions are
141
+ treated kinematically, i.e., as Mulitple Point Constraints (MPC) by the projection of the nodes belonging
142
+ to the slave surface (deformable’s body) to the master surface (rigid’s body) using a Dirichlet condition.
143
+ Then, a local coordinate system with normal-tangent basis vectors nj and tj is created for each detected
144
+ node j. The contact node is restricted to only move to the tangent line defined by the vector tj, see Fig.
145
+ 1e. In a hypothetical frictional contact problem, the friction force would be imposed in this direction as a
146
+ Neumann boundary condition. In this work, friction is not considered for the low-velocity impact as the
147
+ relative velocities at the contact zone are sufficiently small. After that, the contact algorithm checks the
148
+ 4
149
+
150
+ 2
151
+ (a)
152
+ (c)
153
+ (b)
154
+ (d)
155
+ Rigid
156
+ Deformable
157
+ Contact node
158
+ Released (free) node
159
+ (e) Kinematic constraint for node 1 and 2
160
+ n2
161
+ t2
162
+ n1
163
+ t1
164
+ 1
165
+ 2
166
+ f c
167
+ f c
168
+ f c
169
+ f c
170
+ f c
171
+ f c
172
+ Figure 1: Iterative process of the parallel PDN contact algorithm. (a) Interaction; (b) Overlapping; (c) Dirichlet boundary
173
+ conditions (projections) (d) Released nodes and equilibrium. (e) Kinematic constraint for node 1 and 2. The reader is referred
174
+ to the web version of this paper for the color representation of this figure.
175
+ presence of adhesion or artificial contact nodes, i.e., nodes in traction (Fig. 1c). The reaction contact force
176
+ f c
177
+ j has to satisfy the following condition:
178
+ f c
179
+ j · nj ≥ 0
180
+ (2)
181
+ Those adhesion nodes have to be released, so the current time step tn+1 has to be repeated; the i index
182
+ shown in Fig. 1c and 1d represents the sub-iterations for node release. The whole kinematic constraint
183
+ process is depicted for two of the contacting nodes in Fig. 1e. The nodes release algorithm for explicit time
184
+ schemes is described in Algo. 2 in Appendix
185
+ A. The condition to distinguish a true contact node or an
186
+ adhesion (artificial) contact node is by means of the contact force (reaction due to the Dirichlet condition).
187
+ The vector of contact forces using total Lagrangian formulation can be expressed as:
188
+ (f c
189
+ j)T =
190
+
191
+ Ω0
192
+ BT
193
+ 0jP dΩ0
194
+ (3)
195
+ where BT
196
+ 0j is the matrix containing the derivatives of the shape functions with respect to the reference
197
+ system and P is the nominal stress tensor, see [6].
198
+ An exciting aspect of the PDN method is that the computational cost of the projections is very small
199
+ compared to Penalty or Langrange approaches [25]. One of the most consuming parts and a vital issue for
200
+ further research is the contact searching and communication between the subdomains (belonging to different
201
+ code instances), as stated in the previous work from the authors [19]. In our case, we use the PLE++ library
202
+ [61], which is an adaptation of the Parallel Location and Exchange PLE library [14]. The main algorithm
203
+ of the PDN contact method is described in Algo. 1 in Appendix
204
+ A and the nodes release algorithm for
205
+ 5
206
+
207
+ explicit schemes is summarized in Algo. 2. The reader is referred to Rivero [46] and Guillamet et al. [19]
208
+ works for more details on the implementation aspects.
209
+ 2.2. Time integration schemes
210
+ 2.2.1. Deformable body
211
+ Spurious oscillations may appear when using explicit time schemes for dynamic and wave propagation
212
+ problems such as impact events. These oscillations occur due to the mismatch of two different types of wave
213
+ components. Thus, dissipative explicit time schemes are often used to reduce the numerical instabilities
214
+ induced by the spatial and time discretization procedures. Among the many dissipative methods available,
215
+ the Tchamwa–Wielgosz (TW) explicit scheme [31] is beneficial because it damps out the spurious oscillations
216
+ occurring in the highest frequency domain. This is the time integration scheme selected in this work, but
217
+ any other explicit time scheme such as the Central Difference (CD) [6] including bulk viscosity could also
218
+ be used.
219
+ The motion described by the TW scheme is the following:
220
+ ˙d
221
+ n+1 = ˙d
222
+ n + ∆t¨d
223
+ n
224
+ (4)
225
+ dn+1 = dn + ∆t ˙d
226
+ n + ϕ(∆t)2¨d
227
+ n
228
+ (5)
229
+ where d, ˙d, ¨d are the displacement, velocity and acceleration nodal vectors, respectively; ∆t is the time
230
+ increment or step size; and ϕ is a numerical viscous parameter, which in the current work is set to 1.033
231
+ [31]. The key to the computational efficiency of explicit time integration schemes is the use of the lumped
232
+ mass matrix for the resolution of the linear system of equations, which is simplified as an easy inversion of
233
+ the diagonal mass matrix [6]. The global stiffness matrix is not required to be assembled as it is needed for
234
+ implicit time integration schemes. The explicit time integration scheme solves accelerations, so their values
235
+ at the beginning of the increment are computed by making use of the equation of motion of the system:
236
+ m ¨d
237
+ n = f n(dn, tn) = f e(dn, tn) − f i(dn, tn) − f c(dn, tn)
238
+ (6)
239
+ where m is the vector representation of the lumped mass matrix, f e is the global vector of external forces, f i
240
+ is the global vector of the internal forces, and f c is the global vector of the contact forces from the Dirichlet
241
+ condition imposed on that nodes. Thanks to m, the acceleration can be computed without invoking any
242
+ solver as:
243
+ ¨d
244
+ n = m−1(f e − f i − f c)
245
+ (7)
246
+ 6
247
+
248
+ 2.2.2. Rigid body
249
+ The striker in the present work is considered as a rigid body. The resolution of the equations of motion
250
+ for the rigid bodies we use a 4th order Runge Kutta scheme. Let’s consider the following differential equation
251
+ where the right hand side is a function of both time and another function dependent on time.
252
+ dy
253
+ dt = f(t, y(t))
254
+ (8)
255
+ From this equation, the Runge-Kutta method estimates the solution at n + 1 taking into account four
256
+ evaluations of the right hand side step dt as follows,
257
+ k1 = dt · f(t, y(t))
258
+ k2 = dt · f(t + dt
259
+ 2 , y(t) + k1
260
+ 2 )
261
+ k3 = dt · f(t + dt
262
+ 2 , y(t) + k2
263
+ 2 )
264
+ k4 = dt · f(t + dt, y(t) + k3)
265
+ yn+1 = y(t + dt) = y(t) + k1
266
+ 6 + k2
267
+ 3 + k3
268
+ 4 + k4
269
+ 6
270
+ (9)
271
+ In the present paper, the motion of the striker is solved by making use of the following differential
272
+ equation:
273
+ m · ¨d = m · g − f e
274
+ (10)
275
+ where m is a scalar value of the mass of the rigid body, g is the gravity force vector at the center of mass,
276
+ ¨d is the linear acceleration, and f e is the external force also at the center of mass from the rigid body. It is
277
+ worth mentioning that the rigid body is represented by a point (center or mass), so the above vectors have
278
+ a dimension of 2 for 2-d problems and 3 for 3-d problems. When contact occurs the external force from the
279
+ rigid body is calculated by f e = �nc
280
+ j=1 f c
281
+ j, where j denotes a contact node and nc is the total contact nodes
282
+ belonging to the deformable body.
283
+ 2.3. Mesoscale damage modeling for fiber-reinforced composites
284
+ The mesoscopic length scale is the most suitable for virtual testing of low-velocity impacts on structures
285
+ made of composite materials. At this scale, the numerical predictions have a good trade-off between infor-
286
+ mation about the damage mechanisms driving the failure process and the structural response without the
287
+ complexity of dealing with intricate microstructures. It is worth emphasizing that the mesoscopic length
288
+ scale is not only appropriate for the bottom levels of the building block approach (coupon and elements)
289
+ [28, 53, 15, 50] but also for the top levels (sub-components and components) [43, 44, 13].
290
+ 7
291
+
292
+ From the constitutive modelling viewpoint, the mesoscopic length scale simplifies the intricate microstruc-
293
+ ture of long fibre composite laminates by homogenising the properties and mechanisms at the lamina level.
294
+ The outcome is a layered material with two well-defined regions: intralaminar and interlaminar. The former
295
+ is modelled as a transversally isotropic material, which can fail due to fibre breaking and matrix cracking
296
+ according to the loading scenario. The latter is modelled as a very thin region, usually tending to zero
297
+ thickness, where delamination can onset and propagate.
298
+ Regarding the modelling architecture, several strategies exist in the literature suitable for modelling
299
+ composite at a mesoscopic length scale using FEM [37]. We adopt a continuous approach for the intralam-
300
+ inar region (CDM with linear elements) and a discontinuous one for the interlaminar (CZM with interface
301
+ elements). The straightforward implementation of this strategy in a standard FEM code aids in preserv-
302
+ ing the scalability of Alya multiphysics [41]. Thus, the mesoscale damage modeling strategy exploits the
303
+ computational resources to maximize the accuracy of the impacts thanks to very thin meshes.
304
+ 2.3.1. Intralaminar damage model
305
+ The intralaminar damage model for predicting ply failure is based on the continuum damage mechanics
306
+ framework.
307
+ Fiber and matrix cracks are smeared in the continuum and represented by state variables.
308
+ Accordingly, the crack’s kinematics is not explicitly represented, but their effects on the degradation of the
309
+ capacities of sustaining loads. In turn, the onset and growth of the damage failure mechanisms are governed
310
+ by the failure surfaces and evolution laws. In this work, we employ a local damage model based on the
311
+ constitutive modeling framework for long fiber composite materials proposed by Maim´ı et al. [32, 33]. This
312
+ framework has been used widely in the literature, demonstrating outstanding accuracy and performance not
313
+ only for static scenarios [11, 7, 41] but also for impact [17, 51, 48] and fatigue [26, 27].
314
+ The main ingredients of the intralaminar damage model are: i) transversally isotropic elasto-plastic re-
315
+ sponse, ii) damage activation functions related to the different ply failure mechanisms through the maximum
316
+ strain criterion for the fiber breaking and the LaRC criteria for the matrix cracking, iii) the damage evolution
317
+ laws are defined to dissipate the fracture energy associated to the opening mode ensuring mesh objectivity
318
+ by the crack-band theory [5], and iv) the thermodynamic consistency is ensured by imposing irreversibility
319
+ of the damage variables.
320
+ Fig. 2 illustrates the intralaminar failure modes schematically modelled, while Algo. 4 in Appendix A
321
+ summarises the material model workflow. Note that a plastic response under shear loads is considered,
322
+ and five damage mechanisms are modelled: fibre breaking, fibre kinking, tensile and compressive matrix
323
+ cracking, and shear matrix cracking. The details of the expressions employed and their justification from a
324
+ physical standpoint can be found in [32, 33, 51].
325
+ Besides the constitutive response, the intralaminar damage model also encloses the computation of the
326
+ critical time step, which is required by the explicit time integration scheme. For the sake of simplicity, we
327
+ 8
328
+
329
+ Fibre breaking
330
+ Fibre kinking
331
+ Matrix cracking
332
+ Matrix cracking
333
+ Matrix cracking
334
+ Figure 2: Schematic representation of the intralaminar damage mechanisms. Adapted from [26].
335
+ utilise the same formula of a transversally isotropic material:
336
+ ∆t = vsound
337
+ ℓc
338
+ =
339
+
340
+ max Cij
341
+ ρ
342
+ (11)
343
+ where Cij are the components of the effective stiffness matrix, ρ is the density of the material, and ℓc is
344
+ the characteristic element length. Considering a structured hexahedral mesh employed, we approximate the
345
+ characteristic element length with the element volume Ve [33]:
346
+ ℓc ≈
347
+ 3�
348
+ Ve
349
+ (12)
350
+ 2.3.2. Interlaminar damage model
351
+ The interlaminar damage model for predicting the onset and propagation of delamination is based on
352
+ the cohesive zone approach and formulated in the context of damage mechanics. Accordingly, a damage
353
+ state variable is employed to account for the gradual loss of the bearing capacities of the material in the
354
+ cohesive zone due to the separation of crack surfaces. In turn, the separation or opening of the crack is
355
+ represented by a kinematic quantity noted as displacement jump, which is approximated by employing the
356
+ interface element technology [2, 39, 9]. Thus, the interlaminar damage model is a constitutive model that
357
+ computes the cohesive reactions as a function of displacement jumps. More details of the mesoscale modeling
358
+ of delamination using cohesive zone models can be found in Carreras et al. [12].
359
+ In this work, we use the cohesive zone model proposed by Turon et al. [54, 56], which has been employed
360
+ extensively in the literature [20, 47, 40, 42]. The main characteristics of this CZ model are: i) linear response
361
+ 9
362
+
363
+ 125 μm125 μm125 μm125 μm125 μmbefore initiation of the softening, ii) linear relation between the cohesive tractions and crack openings,
364
+ iii) onset and propagation of the damage in compliance with the Benzeggagh-Kenane criterion, and iv)
365
+ thermodynamic consistency despite the loading scenario, even when the mix-mode ratio varies. Algo. 5
366
+ summarizes the cohesive zone model workflow.
367
+ Regarding the element technology, we employ zero-thickness interface elements for capturing the delam-
368
+ ination, implemented using the formulation presented in [45]. As standard interface elements are used, the
369
+ integrals are computed using a Newton-Cotes integration scheme to mitigate the spurious oscillations in the
370
+ traction profile along the interface [49]. The stable time increment, which is necessary for the explicit time
371
+ integration scheme is obtained through [51]:
372
+ ∆tcoh =
373
+
374
+ ¯ρ
375
+ Kcoh
376
+ (13)
377
+ where ¯ρ and Kcoh are numerical parameters known as cohesive surface density and penalty stiffness, re-
378
+ spectively. The cohesive surface density for zero-thickness elements is approximated by the expression in
379
+ [52]. In turn, the cohesive penalty stiffness is defined to avoid affecting the compliance of the system as
380
+ Kcoh ≥ 50ET /tlam, where ET is the transverse elastic modulus and tlam the adjacent laminate thickness
381
+ [55].
382
+ 2.4. Mesh refinement algorithm for interface elements with a cohesive law
383
+ The mesh multiplication algorithm proposed by Houzeaux et al. [22] has been extended to deal with
384
+ the presence of interface elements or even continuum shell element formulations.
385
+ Focusing on interface
386
+ elements, they are zero-thickness elements with a cohesive material law that are inserted between plies in
387
+ a laminated composite material in order to predict the delamination damage mechanism. It is well known
388
+ that an accurate prediction of the onset and propagation of delamination in composite materials requires
389
+ very refined meshes, as stated in [54, 55]. However, depending on the number of interface layers or the
390
+ geometry size, it can be challenging to place these elements between plies and computationally demanding
391
+ to solve the problem.
392
+ On the other hand, mesh generation of large meshes is often a bottleneck in engineering applications
393
+ to deal with thousands of millions of elements. Thus, integrated tools for mesh refinement within parallel
394
+ codes devised for High-Performance systems allow a parallel and fast refinement of the coarse mesh without
395
+ the need to create the mesh again.
396
+ Therefore, this paper also introduces a new capability of the mesh multiplication algorithm from [22],
397
+ which enables the refinement of large-scale problems, including interface elements. Let’s assume a configu-
398
+ ration of two bulk elements together with an interface element between them, as shown in Fig. 3.
399
+ 10
400
+
401
+ 1
402
+ 2
403
+ 3
404
+ 4
405
+ 5
406
+ 6
407
+ 7
408
+ 8
409
+ 9
410
+ 10
411
+ 11
412
+ 12
413
+ 13
414
+ 14
415
+ 15
416
+ 16
417
+ Interface
418
+ element
419
+ Solid
420
+ element
421
+ 2
422
+ 3
423
+ 4
424
+ 1
425
+ 6
426
+ 7
427
+ 8
428
+ 5
429
+ Original node
430
+ New edge node
431
+ New face node
432
+ New center node
433
+ 2
434
+ 3
435
+ 4
436
+ 6
437
+ 8
438
+ 5
439
+ (ne
440
+ ELINT= 4)
441
+ Bulk element
442
+ Interface element
443
+ (ne
444
+ BULK = 8)
445
+ Global numbering
446
+ Local numbering
447
+ 1
448
+ 7
449
+ Figure 3: Mesh multiplication between bulk and interface (cohesive) elements.
450
+ The 8-node interface element can only be divided into four elements to avoid the duplication of the
451
+ element at the interface mid-plane between the bulk elements. The criterion used for the correct division
452
+ is by making use of the element normal, also known as stacking direction, which is required for the proper
453
+ behaviour of the element due to its kinematics. Thus, those parallel planes to the element normal are used
454
+ to divide the element. The dimensions of the new mesh can be calculated as follows:
455
+ ne = 8 · n0
456
+ e,BULK − n0
457
+ e,ELINT · 4
458
+ nn = n0
459
+ n + nedges + nfaces + n0
460
+ e,BULK − n0
461
+ e,ELINT − n0
462
+ edges,ELINT − n0
463
+ faces,ELINT
464
+ nb = 4 · n0
465
+ b − 2 · n0
466
+ b,ELINT
467
+ (14)
468
+ where ne, nn and nb are the total number of elements, nodes and boundaries for the new mesh. In order
469
+ to refine the hybrid mesh is important to know the total number of n0
470
+ e, n0
471
+ n and n0
472
+ b from the original mesh
473
+ and also information about the edges and faces that have to be divide or not. Algo. 3 summarizes the
474
+ different steps and functions for the mesh division and reconstruction of the interface domains in a parallel
475
+ framework.
476
+ 3. Benchmark tests
477
+ Three benchmark tests are conducted to validate the application of the parallel partial Dirichlet-Neumann
478
+ contact algorithm using an explicit time integration scheme. The first example consists of a quasi-static
479
+ indentation test, which has already been solved using an implicit time integration scheme in [19].
480
+ The
481
+ solution using explicit analysis is compared with the numerical solution obtained for implicit analysis. The
482
+ second and the third examples consist of a low-velocity impact event on two coupons manufactured with two
483
+ 11
484
+
485
+ well-known material systems for the damage prediction: T800S/M21 and AS4/8552 respectively. Thanks
486
+ to the proposed algorithm’s flexibility and generality, we use a multi-code approach, where the motion of
487
+ each body (rigid and deformable) is solved using different instances of Alya. Regarding the partitioning
488
+ of the mesh, we use the Space-Filling Curve (SFC) based partitioner described in [8], which performs the
489
+ partitioning in parallel and maximizes the load balance. It is worth highlighting that all the executions
490
+ here are in parallel (pre-process, solution, and post-process steps). In all the examples, the contact bodies
491
+ are discretized with a refined finite element mesh to assess the geometrical localization between both code
492
+ instances and to obtain an accurate prediction of the contact force. All the simulations are conducted in
493
+ MareNostrum4 supercomputer. This cluster has 3456 nodes, each of them with 48 processors Intel Xeon
494
+ Platinum @ 2.1 [GHz], giving a total processor count of 165 888 processors.
495
+ 3.1. Quasi-static indentation test
496
+ This example has already been solved using an implicit time solution scheme in [19, 46]. This case is
497
+ now solved as a quasi-static problem using explicit dynamics. The example consists of a rigid rounded head
498
+ (indenter) and a deformable beam, see Fig. 4. The geometrical dimensions of the indenter (rigid body) are
499
+ ri = 1 m and wi = 0.5 m, while the beam (deformable body) are hb = 0.25 m, lb = 1.5 m and wb = 0.3 m. The
500
+ relative position of the indenter with respect to the beam is given by the parameters ax = 0.25 m, az = 0.1 m
501
+ and ay = 0.01 m (gap). The beam is modelled with an hyperelastic Neo-Hookean formulation [3] and finite
502
+ strains, with material properties Eb = 6.896 × 108 Pa (Young modulus), νb = 0.32 (Poisson ratio) and density
503
+ ρ = 1000 kg m−3. The beam is fully clamped at the bottom face, and a prescribed vertical displacement of
504
+ δ = 0.11 m is applied at the top surface belonging to the indenter. Both bodies are discretized with finite
505
+ elements using full integration: 8-node linear solid elements for the beam and 4-node linear tetrahedrons
506
+ for the indenter. The beam has a base mesh of 3510 elements, while the indenter has 15 960 elements. A
507
+ non-linear dynamic analysis is performed with a total time of the simulation of 0.05 s and a fixed time step of
508
+ 1 × 10−5. The selected time step value is smaller than the stable time increment, which is 2.796 × 10−5, and
509
+ no mass scaling is used. In order to perform a quasi-static event and minimize the kinetic energy, a smooth
510
+ step function (fifth-order polynomial) is applied. This function has the form A0+(AE −A0)ξ3(10−15ξ+6ξ2)
511
+ for t0 ≤ t < tE, where A0 and A1 are the initial and final amplitude, t0 and tE are the initial and final time
512
+ of the simulation and ξ =
513
+ t−t0
514
+ tE−t0 . This smooth load rate ensures that the first and second time derivatives
515
+ are zero at the beginning and the end of the transition.
516
+ 12
517
+
518
+ Beam (deformable)
519
+ hb
520
+ x
521
+ y
522
+ z
523
+ y
524
+ lb
525
+ wi
526
+ wb
527
+ az
528
+ ax
529
+ Indenter (rigid)
530
+ ri
531
+ ux = uy = uz = 0
532
+ ux = uz = 0,
533
+ ay
534
+ uy =
535
+ Figure 4: Setup for the quasi-static indentation test. Adapted from [19].
536
+ Displacements and forces obtained at the contact zone are shown in Fig. 5 for two different paths. Line
537
+ path a is centered and goes from one side to the other in the length direction of the beam, while line path b
538
+ is also centered in the width direction. The numerical prediction using the explicit time integration scheme
539
+ is compared with the implicit solution obtained in [19]. We can observe an excellent agreement between
540
+ both numerical predictions in terms of the displacements and contact force.
541
+ (a)
542
+ (b)
543
+ (c)
544
+ (e)
545
+ (d)
546
+ Path b
547
+ Path a
548
+ [18]
549
+ [18]
550
+ [18]
551
+ [18]
552
+ [18]
553
+ Figure 5: Displacements and contact forces at straight lines path a and b. (a) Tangential displacement in x-direction for path
554
+ a. (b) Normal displacement in y-direction for path a. (c) Tangential displacement in z-direction for path b. (d) Contact force
555
+ at line path a. (e) Contact force at line path b.
556
+ 13
557
+
558
+ 3.2. Low velocity impact on a composite plate
559
+ The proposed benchmark consists of a drop-weight of a rigid hemispherical striker on a rectangular plate
560
+ made of composite material, see Fig. 6. Two impact scenarios using different material systems, layups, and
561
+ impact energies are considered for the validation of the proposed framework. The materials selected are
562
+ the unidirectional prepreg M21/194/34%/T800S (T800S/M21) and the unidirectional prepreg AS4/8552,
563
+ both carbon-epoxy systems. On the one hand, the coupon made of T800S/M21 is manufactured by Hellenic
564
+ Aerospace Industry and tested at Element Materials Technology Seville facilities within the framework of the
565
+ CleanSky2 SHERLOC project. Most of the material properties from the T800S/M21 are also characterized
566
+ by Hellenic Aerospace Industry and Element Materials Technology Seville. On the other hand, the coupon
567
+ made of AS4/8552 is chosen from literature through the works conducted by Gonz´alez et al.
568
+ [17] and
569
+ Soto et al. [51]. All the material properties from the aforementioned materials, including damage model
570
+ parameters, are summarized in Tab. 1. The intralaminar damage model is fed by the in-situ strengths which
571
+ are calculated following the works by Furtado et al. [15] and Soto et al. [51].
572
+ Rubber clamp
573
+ (ux = uy = uz = 0)
574
+ Striker
575
+ rs = 8 mm
576
+ (ux = uy = 0)
577
+ ms = 5 kg
578
+ Top view
579
+
580
+ 75 mm
581
+ 125 mm
582
+ R7
583
+ Refined region
584
+ (75 mm x 75 mm)
585
+
586
+ (uz = 0)
587
+ Window cut
588
+
589
+ Stacking sequence
590
+ [454/04/-454/904]s
591
+ Cohesive elements between clusters
592
+ tply = 0.18125 mm
593
+ tcoh = 1.0 x 10-4 mm
594
+ Figure 6: Numerical setup for the low velocity impact test. The mesh and layup correspond to the coupon made of AS4/8552
595
+ material used for the parallel performance analysis.
596
+ Both impact case scenarios follow the standard ASTM D7136/D7136M-20 [23] for damage resistance
597
+ evaluation of fiber-reinforced polymers. Each plate has the same dimensions: 150 mm×100 mm and each
598
+ of them are supported on a metallic frame with a cut-out of 125 mm×75 mm.
599
+ Rubber-tipped clamps
600
+ clamp the plate instance at the four corners. We consider equivalent boundary conditions to represent this
601
+ experiment. As we can see in Fig. 6, the metallic frame and the rubber clamps from the experiment do not
602
+ exist as physical entities, so we only consider the contact surface from the rubber cylinder-shaped clamps
603
+ and the contact edges of the cut-out window of the metallic frame, where we apply the boundary conditions.
604
+ 14
605
+
606
+ T800S/M21
607
+ AS4/8552
608
+ Property
609
+ Value
610
+ CV(%)
611
+ Ref.
612
+ Value
613
+ Ref.
614
+ Density (t/mm3)
615
+ 1.59 × 10−9
616
+ -
617
+ 1.59 × 10−9
618
+ [51, 17]
619
+ Elastic
620
+ E11 (MPa)
621
+ 138.4 × 103
622
+ 1.95
623
+ 128.0 × 103
624
+ [51, 17]
625
+ E22 = E33 (MPa)
626
+ 8.54 × 103
627
+ 3
628
+ 7.63 × 103
629
+ [51, 17]
630
+ ν12 = ν13 (-)
631
+ 0.311
632
+ 16
633
+ 0.35
634
+ [51, 17]
635
+ ν23 (-)
636
+ 0.45
637
+ -
638
+ 0.45
639
+ [51, 17]
640
+ G12 = G13 (MPa)
641
+ 4.29 × 103
642
+ 3
643
+ 4.358 × 103
644
+ [51, 17]
645
+ G23 (MPa)
646
+ 2.945 × 103
647
+ -
648
+ 2.631 × 103
649
+ [51, 17]
650
+ Strength
651
+ XT (MPa)
652
+ 2854.0
653
+ 4
654
+ 2300.0
655
+ [51, 17]
656
+ XC (MPa)
657
+ 1109.0
658
+ 13
659
+ 1531.0
660
+ [51, 17]
661
+ YT (MPa)
662
+ 56.6
663
+ 5.8
664
+ 74.2
665
+ YC (MPa)
666
+ 250.0
667
+ [15]
668
+ 199.8
669
+ [51, 17]
670
+ SL (MPa)
671
+ 93.7
672
+ 0.6
673
+ 94.36a
674
+ [36]
675
+ αo (◦)
676
+ 53
677
+ [32]
678
+ 53
679
+ [32]
680
+ In-situ strengthsc
681
+ Y is
682
+ T,int (MPa)
683
+ 132.5 (1tply)
684
+ -
685
+ 117.5 (4tply)
686
+ Y is
687
+ T,int (MPa)
688
+ 93.7 (2tply)
689
+ -
690
+ 117.5 (8tply)
691
+ Y is
692
+ T,out (MPa)
693
+ 83.8 (1tply)
694
+ -
695
+ 74.2 (4tply)
696
+ Y is
697
+ C,int (MPa)
698
+ 250.0 (1tply)
699
+ -
700
+ 199.8 (4tply)
701
+ Y is
702
+ C,int (MPa)
703
+ 250.0 (2tply)
704
+ -
705
+ 199.8 (8tply)
706
+ Y is
707
+ C,out (MPa)
708
+ 250.0 (1tply)
709
+ -
710
+ 199.8 (4tply)
711
+ Sis
712
+ L,int (MPa)
713
+ 116.0 (1tply)
714
+ -
715
+ 120.8 (4tply)
716
+ Sis
717
+ L,int (MPa)
718
+ 116.0 (2tply)
719
+ -
720
+ 120.8 (8tply)
721
+ Sis
722
+ L,out (MPa)
723
+ 93.7 (1tply)
724
+ -
725
+ 94.4 (4tply)
726
+ Fracture toughness
727
+ GXT (N/mm)
728
+ 340
729
+ [15]
730
+ 81.5
731
+ [51, 17]
732
+ GXC (N/mm)
733
+ 60.0
734
+ [15]
735
+ 106.3
736
+ [51, 17]
737
+ GY T (N/mm)
738
+ GIc
739
+ 7.3
740
+ GIc
741
+ [51, 17]
742
+ GY C (N/mm)
743
+ 1.38b
744
+ 20
745
+ 1.313b
746
+ [51, 17]
747
+ GSL (N/mm)
748
+ GIIc
749
+ 20
750
+ GIIc
751
+ [51, 17]
752
+ Traction separation law
753
+ fXT (-)
754
+ 0.1
755
+ -
756
+ 0.1
757
+ [51, 17]
758
+ fGT (-)
759
+ 0.6
760
+ -
761
+ 0.6
762
+ [51, 17]
763
+ fXC (-)
764
+ 0.1
765
+ -
766
+ 0.1
767
+ [51, 17]
768
+ fGC (-)
769
+ 0.9
770
+ -
771
+ 0.9
772
+ [51, 17]
773
+ Matrix plasticity
774
+ Sp (N/mm)
775
+ 66.9
776
+ -
777
+ [15]
778
+ 62.0a
779
+ Kp (N/mm)
780
+ 0.09
781
+ -
782
+ [15]
783
+ 0.1936a
784
+ Interface properties
785
+ GIc (N/mm)
786
+ 0.308
787
+ 7.3
788
+ 0.28
789
+ [51, 17]
790
+ GIIc (N/mm)
791
+ 0.828
792
+ 20
793
+ 0.79
794
+ [51, 17]
795
+ τI (MPa)
796
+ 49.2d
797
+ 5.8
798
+ YT
799
+ [51, 17]
800
+ τII (MPa)
801
+ 80.7d
802
+ 0.6
803
+ SL
804
+ [51, 17]
805
+ η (-)
806
+ 1.75
807
+ -
808
+ 1.45
809
+ [51, 17]
810
+ Kcoh (M/mm3)
811
+ 1.1 × 106
812
+ -
813
+ 2.5 × 104
814
+ [51]
815
+ a Best fitted based on properties from [36]
816
+ b GY C = GSL/cos(αo) [32]
817
+ c Calculated considering plasticity using equations from [51]
818
+ d Engineering solution by Turon et al. 2007 [55] using Ne=5
819
+ Table 1: Material properties for the M21/194/34%/T800S (T800S/M21) and Hexply AS4/8552 including damage models
820
+ parameters.
821
+ 15
822
+
823
+ The velocity of the striker is given as an initial condition set in the impact direction, while the remaining
824
+ degrees of freedom are constrained. The initial velocity of the striker is calculated based on the impact
825
+ energy of each case study. The initial position of the striker has a gap of 0.01 mm between the striker
826
+ tip and the top surface of the plate in order to avoid overlapping between bodies at the beginning of the
827
+ simulation. Moreover, gravity forces are included in both body instances, considering a gravity value of
828
+ 9.81 m/s2.
829
+ We employ 8-node full integration hexahedron elements for the plate using the inter- and intra-laminar
830
+ damage models described in Sec. 2.3.2 and Sec. 2.3.1 respectively. Cohesive elements are inserted at each
831
+ interface between different ply angles.
832
+ It is worth highlighting that other constitutive material models
833
+ and element technologies would also be feasible in combination with the proposed contact algorithm. With
834
+ regards to the strikers used for each impact case scenario, they are discretized with 4-node linear tetrahedron
835
+ elements with a biased mesh of 0.1 mm at the center of the half-sphere and 1 mm at the end of the edge.
836
+ The total number of elements for the striker used for the T800S/M21 and AS4/8552 materials are 32 685
837
+ and 79 934, respectively. Regarding the plates, they both have a refined centered region of 75 mm×75 mm
838
+ with an in-plane element size equal or multiple to the ply thickness, depending on the material system in
839
+ order to guarantee an aspect ratio close or equal to 1.
840
+ 3.2.1. Coupon made of T800S/M21 material
841
+ This impact coupon has a stacking sequence of [45/ − 45/02/90/0]S and is made of T800S/M21. The
842
+ nominal ply thickness is 0.192 mm. This case study is submitted to an impact energy of 10 J, which falls into
843
+ the Barely Visible Impact Damage (BVID) analysis. The striker has a diameter of 25 mm and a mass of 2 kg,
844
+ which is modeled as a rigid body. The global element size for the plate is 1 mm, and each lamina and the
845
+ clusters of two plies have one element through the thickness. The in-plane element size is 0.192 mm which
846
+ is equal to the ply thickness resulting in an aspect ratio of 1 for those elements at plies without clustering
847
+ and located at the refined region. The mesh of the plate has a total of 1 042 525 hexahedron elements (≈
848
+ 3.3 million of Degrees Of Freedom (DOF)). The total time for this simulation is set to 5.0 ms. The initial
849
+ velocity of the striker considering the gap previously mentioned is 3.16 m/s.
850
+ The numerical predictions for this impact case scenario and their comparison with experimental data
851
+ are shown in Fig. 7 and summarized in Tab. 2. The experimental test campaign consisted of testing a
852
+ batch of five coupons to ensure proper repeatability of the results. The force-time for each impact was
853
+ recorded with a limited number of points (52 points on average for each impact test). The reduced number
854
+ of points only allows for validation of the global behavior of the impact case scenario. Energy-time and the
855
+ force-displacement curves are calculated by integrating once and twice the experimental force history curve.
856
+ 16
857
+
858
+ (a)
859
+ (b)
860
+ (c)
861
+ Figure 7: Experimental and numerical curves for the 10J impact on the coupon made of T800S/M21 material. (a) Impact
862
+ force-time. (b) Impact force-displacement. (c) Energy-time.
863
+ As we can see either in Fig. 7 and Tab. 2 a good agreement is obtained between experiments and
864
+ numerical predictions.
865
+ On the one hand, the proposed contact algorithm combined with the proposed
866
+ damage models is able to capture the maximum impact force very well and the maximum displacement
867
+ pretty well with errors below 10%, respectively. On the contrary, the different dissipated energies obtained by
868
+ the experiments show a high dispersity between them, resulting in difficulty in conducting a fair comparison
869
+ between the predicted value 1.1 J and the experimental mean value.
870
+ Experiment
871
+ Prediction
872
+ Difference (%)
873
+ Mean
874
+ Std.
875
+ Maximum impact force, f c
876
+ max (kN)
877
+ 5.3
878
+ 0.2
879
+ 5.2
880
+ -1.0
881
+ Maximum displacement, dmax (mm)
882
+ 5.4
883
+ 0.1
884
+ 4.9
885
+ -9.3
886
+ Dissipated energy, Edis (J)
887
+ 0.2
888
+ 0.3
889
+ 1.1
890
+ >10
891
+ Table 2: Comparison of numerical results with experimental data for the impact case scenario of the plate made of T800S/M21
892
+ material.
893
+ 3.2.2. Coupon made of AS4/8552 material
894
+ This second case consists of a coupon made with the AS4/8552 material.
895
+ The plate has a stacking
896
+ sequence of [454/04/ − 454/904]S with a nominal ply thickness of 0.181 mm resulting a plate thickness of
897
+ 5.8 mm. This case study has higher energy (19.3 J) than the previous one, and it also includes clusters of four
898
+ and eight plies which are potential for extensive matrix cracks and delaminations. The energy of 19.3 J also
899
+ falls into BVID analysis. The in-plane element sizes used in [17] and [51] are 0.3 mm and 0.5 mm respectively.
900
+ In the present work, two element sizes are studied using the mesh refinement algorithm described in Sec. 2.4,
901
+ see Tab. 3. The base mesh for the plate has a total of 335 622 hexahedron elements (≈ 1 million of Degrees
902
+ Of Freedom (DOF)). The total time for the simulation is set to 5.0 ms. In this case, the striker has a mass
903
+ 17
904
+
905
+ of 5 kg, and its radius is 8 mm. The initial velocity of the striker considering the gap previously mentioned
906
+ is 2.78 m/s.
907
+ Refinement
908
+ Element
909
+ No. element
910
+ No. elem.
911
+ No. nodes
912
+ Initial stable
913
+ level, ndivi
914
+ size (mm)
915
+ through ply cluster
916
+ plate
917
+ plate
918
+ time increment (s)
919
+ 0
920
+ 0.7250
921
+ 1
922
+ 335 622
923
+ 364 320
924
+ 7.378 × 10−8
925
+ 1
926
+ 0.3625
927
+ 2
928
+ 2 109 624
929
+ 2 219 983
930
+ 3.515 × 10−8
931
+ Table 3: Element sizes used on the coupon made of AS4/8552 material system and initial stable time increment for each case
932
+ study.
933
+ The numerical predictions of the impact force-displacement and energy - time curves are shown in Fig.
934
+ 8 and Fig. 9 respectively, using different element sizes. The most important physics variables for a proper
935
+ validation are summarized in Tab. 4. This table compares the experimental results from [51] with the
936
+ numerical predictions.
937
+ Case
938
+ f c
939
+ del (kN)
940
+ f c
941
+ max (kN)
942
+ dmax (mm)
943
+ Edis (J)
944
+ Aproj
945
+ del
946
+ (mm2)
947
+ Experiment [17]
948
+ 4.41
949
+ 7.74
950
+ 3.72
951
+ 12.03
952
+ 3898.3
953
+ Numerical (le = 0.7250 mm)
954
+ 4.20
955
+ 8.70
956
+ 3.60
957
+ 7.70
958
+ 4723.1
959
+ Numerical (le = 0.3625 mm)
960
+ 4.30
961
+ 8.30
962
+ 3.70
963
+ 7.90
964
+ 5249.20
965
+ Table 4: Comparison of the numerical results obtained with the proposed framework with experimental data from [51]. fc
966
+ del is
967
+ the delamination threshold force, fc
968
+ max is the maximum contact force, dmax is the maximum indentation, Edis is the dissipated
969
+ energy and Aproj
970
+ del
971
+ is the projected delamination area.
972
+ The initial elastic deflection of the plate is very well captured for all the meshes (Fig. 8), meaning that
973
+ the stiffness of the plate is accurately predicted by the PDN contact algorithm. After that, delamination
974
+ onset occurs at the top of the elastic part, around 4.5 kN. This point is also very well captured by the
975
+ interlaminar damage model using cohesive elements between each of the ply clustering. Then, a combination
976
+ of interlaminar and intralaminar damage occurs until the striker reaches both the maximum load and
977
+ displacement, resulting with a pretty good prediction as also shown in Tab. 4. Since damage appears, the
978
+ continuum damage models and the characterization of the material properties play a fundamental role in the
979
+ simulation of this benchmark case. Despite the delamination threshold, maximum force and displacement
980
+ are very well captured; the dissipated energy and the projected delamination area are overpredicted, see
981
+ Tab. 4. According to Soto et al. [51], the projected delamination and the corresponding energy dissipated
982
+ could be considerably improved when using solid elements with one integration point for the bulk material
983
+ and cohesive contact surfaces instead of cohesive elements to be able to better predict the delamination
984
+ shapes at each interface of the layup.
985
+ 18
986
+
987
+ Figure 8: Numerical prediction of the force-displacement curved using two element sizes and correlation with the experiment
988
+ from Gonz´alez et al. [17].
989
+ Figure 9: Numerical prediction of the impact energy vs. time using two element sizes and correlation with the experiment from
990
+ Gonz´alez et al. [17].
991
+ Fig. 10 depicts and aims to quantify the most important failure mechanisms that appear on the plate.
992
+ Fiber damage is represented by damage variable D1, which includes both fiber breakage and fiber kinking,
993
+ see Fig. 10a. As we can see, this source of damage is not the most predominant and mostly appears at the
994
+ bottom of the striker. Matrix cracking is represented with the damage variable D2, which includes matrix
995
+ tension and compression (Fig. 10b). Finally, the last source of damage is delamination (Fig. 10b). Its
996
+ prediction is compared with the shape obtained from the experiment, which is represented in dashed lines.
997
+ As we discussed previously, this source of damage is overpredicted for all the element sizes studied, see Tab.
998
+ 4 and further research would be required in that direction as the values of the material properties, and
999
+ 19
1000
+
1001
+ the damage models play a fundamental role. Furthermore, the extensive matrix cracks and delamination
1002
+ predicted for this impact case scenario corroborate the experimental observations by Gonz´alez et al. [18] on
1003
+ the effect of ply clustering to originate extensive matrix cracks and large delaminations.
1004
+ 3898.3 mm2
1005
+ (a)
1006
+ (b)
1007
+ (c)
1008
+ Experiment
1009
+ Numerical
1010
+ 5248.2 mm2
1011
+ 10 mm
1012
+ Dcoh
1013
+ D1
1014
+ D2
1015
+ Figure 10: Numerical prediction of the damage occurred in the coupon. (a) Fiber damage, D1. (b) Matrix cracking, D2. (c)
1016
+ Projected delamination, Dcoh. The numerical result correspond to the most refined mesh.
1017
+ 3.3. Parallel performance
1018
+ The speedup and the parallel efficiency of the proposed contact algorithm for solving low-velocity impact
1019
+ events are evaluated in this section. All the executions are conducted in MareNostrum4 supercomputer. A
1020
+ strong scalability analysis has been conducted using a larger mesh than the ones studied in Sec. 3.2. The
1021
+ model corresponds to the AS4/8552 impact case scenario. The new mesh has a total of 74M elements with
1022
+ 228M of DOF, which results from a base mesh of 1 472 328 elements using two levels of the mesh refinement
1023
+ algorithm. Strong scalability consists of fixing the mesh and solving the problem with a different number
1024
+ of processors, Central Processing Unit (CPU). The strong speedup is calculated as
1025
+ t0
1026
+ tN while the parallel
1027
+ efficiency is calculated as
1028
+ t0N0
1029
+ tNN , where N is the number of processors and t0 is the reference simulation
1030
+ time for N0 processors. The number of processors used for this analysis ranges from 192 to 2400. Due to
1031
+ the resolution of the problem following a multibody/multicode approach, the number of processors for the
1032
+ striker is fixed to 16 (sufficiently for its mesh) while the number of processors for the plate is changed. It
1033
+ is worth mentioning that the strong computational effort falls in the resolution (deformation) of the plate
1034
+ and the localization and exchange of information phases, as explained in [19]. Due to the small time step
1035
+ in this simulation, 9.261 × 10−9 s, the simulations for the scalability curve are limited to the first 7460 time
1036
+ 20
1037
+
1038
+ DAM01
1039
+ 0.0e+00 0.1
1040
+ 0.2
1041
+ 0.3
1042
+ 0.4
1043
+ 0.5
1044
+ 0.7
1045
+ 0.80.9 1.0e+00
1046
+ Y
1047
+ Y
1048
+ L.
1049
+ DAM02
1050
+ DCOHE
1051
+ 0.0e+00 0.1
1052
+ 0.2
1053
+ 0.3
1054
+ 0.4
1055
+ 0.5
1056
+ 0.6
1057
+ 0.7
1058
+ 0.8
1059
+ 0.9 1.0e+00
1060
+ 0.0e+00 0.1
1061
+ 0.2
1062
+ 0.3
1063
+ 0.4
1064
+ 0.5
1065
+ 0.6
1066
+ 0.7
1067
+ 0.8
1068
+ 0.9 1.0e+00steps. The end of the execution (last time step) corresponds to an impact force of approximately 1 kN, which
1069
+ falls into the linear elastic regime of the force-displacement curve shown in Fig. 8. The strong speedup and
1070
+ parallel efficiency are shown in Fig. 11. The ideal scalability and efficiency are represented with a dashed
1071
+ line.
1072
+ Average No. of elements per core
1073
+ Strong speedup
1074
+ Parallel Efficiency
1075
+ Total No. of processors
1076
+ Total No. of processors
1077
+ Figure 11: Strong scalability of the low velocity impact test with a plate mesh of 74M hexahedron elements.
1078
+ The model
1079
+ corresponds to the benchmark case using AS4/8552 material system.
1080
+ The results obtained in Fig. 11 show that the scalability of the problem in explicit analysis is really
1081
+ good up to 2400 processors using a mesh of 74M elements. The parallel efficiency is maintained above 90%,
1082
+ which demonstrates the good scalability of the proposed framework to deal with large-scale problems. This
1083
+ linear behavior is also shown in Tab. 5 where we summarize the total CPU time for each execution using a
1084
+ different number of processors while maintaing fixed the size of the problem.
1085
+ 21
1086
+
1087
+ No. of CPUs
1088
+ 192
1089
+ 384
1090
+ 768
1091
+ 1536
1092
+ 1824
1093
+ 2064
1094
+ 2400
1095
+ 17:20
1096
+ 08:33
1097
+ 4:15
1098
+ 02:11
1099
+ 01:51
1100
+ 01:39
1101
+ 01:27
1102
+ Table 5: Total CPU time expressed in hh:mm for different executions of the low-velocity impact simulation considering a
1103
+ fixed mesh of 74M of elements (228M of DOF) with a total of 7460 time steps. This CPU time includes the preprocess part,
1104
+ where two mesh refinement levels are performed and the solution of the contact problem within the elastic regime of the
1105
+ force-displacement curve.
1106
+ It is also worth mentioning that the application of the proposed contact algorithm in explicit dynam-
1107
+ ics improves both the speedup and the parallel efficiency in comparison to an implicit resolution for the
1108
+ deformable body (plate), as already studied by the authors in [19]. This improvement in computational per-
1109
+ formance is mainly attributed to the time integration scheme for the deformable body. In explicit dynamics,
1110
+ it is not required to invert the global matrix of the system. In this case, the unknown is the acceleration, and
1111
+ the system is solved directly using the lumped mass matrix and the global force vector on the right-hand
1112
+ side. The reader is referred to [6] for more details.
1113
+ 4. Conclusions
1114
+ In this paper, we apply the parallel PDN contact algorithm to simulate low-velocity impact events on
1115
+ fiber-reinforced polymer composites using a High-Performance Computing environment. Existing damage
1116
+ models from the literature have been implemented in our multiphysics finite element code Alya to simulate
1117
+ the material damage. Moreover, we introduce a new capability in the in-house mesh refinement algorithm to
1118
+ deal with cohesive elements and other element types, such as continuum shell elements. This is really attrac-
1119
+ tive as we can refine the finite element mesh at the beginning of the simulation with a meager computational
1120
+ cost.
1121
+ We validate the whole framework with several benchmark tests. The last example corresponds to a
1122
+ well-known low-velocity impact test following the ASTM standard for damage resistance analysis. In this
1123
+ case, we study two impact case scenarios with two different material systems: the T800S/M21 and the
1124
+ AS4/8552, obtaining excellent predictions for impact behavior and pretty good damage occurrence compared
1125
+ to experimental data from the literature. Additionally, the mesh refinement algorithm’s capabilities have
1126
+ been demonstrated for the plate made of AS4/8552 material.
1127
+ Finally, we evaluate the parallel performance of the impact simulation. Despite not using ”very” large
1128
+ meshes for the physics validation cases, we have generated a new larger mesh using the mesh refinement
1129
+ algorithm. The reason behind this is the stable time increment, which becomes smaller as the element size
1130
+ decrease. The new mesh has 74M hexahedron elements (228M of DOF) using full integration. An excellent
1131
+ computational efficiency (above 90%) has been obtained up to 2400 CPUs, demonstrating its applicability
1132
+ to solve large mesh models ranging from micro-scale to macro-scale.
1133
+ 22
1134
+
1135
+ A further conclusion of this work is that we demonstrate the potential application of the parallel PDN
1136
+ contact algorithm for low-velocity impact events and its parallel efficiency for large models compared to
1137
+ traditional Penalty or Lagrange contact-based methods. As we commented previously, we use full integration
1138
+ elements for all the examples; the use of reduced integration elements, which are more appropriate for
1139
+ explicit schemes and overcome the well-known locking pathologies from solid brick elements, can considerably
1140
+ increase the speedup of the simulations. Moreover, the localization of contact nodes and the communication
1141
+ between subdomains created by the domain decomposition method is a crucial issue for further research as
1142
+ it is the main bottleneck regarding the computational efficiency of contact algorithms.
1143
+ Acknowledgements
1144
+ This work has received funding from the Clean Sky 2 Joint Undertaking (JU) under grant agreements
1145
+ No. 807083 and No. 945521 (SHERLOC project). The JU receives support from the European Union’s
1146
+ Horizon 2020 research and innovation program and the Clean Sky 2 JU members other than the Union.
1147
+ The authors gratefully acknowledge Hellenic Aerospace Industry for manufacturing of the coupons made
1148
+ of T800S/M21 material and Kirsa Mu˜noz and Miguel ´Angel Jim´enez from Element Materials Technology
1149
+ Seville for conducting the experimental impact tests and providing all the experimental data. A. Quintanas-
1150
+ Corominas acknowledges financial support from the European Union-NextGenerationEU and the Ministry
1151
+ of Universities and Recovery, Transformation and Resilience Plan of the Spanish Government through a call
1152
+ of the University of Girona (grant REQ2021-A-30). G. Guillamet thankfully acknowledges the computer
1153
+ resources at MareNostrum and the technical support provided by Barcelona Supercomputing Center (FI-
1154
+ 2019-2-0010). Last but not least, the authors would also like to thank the late Claudio Lopes for all the
1155
+ interesting discussions and contributions to the simulation of impact events and damage on composites.
1156
+ Appendix A. Algorithms
1157
+ Here we summarize the main algorithms of the whole modeling framework to solve low-velocity impact
1158
+ events for damage resistance of fiber-reinforced polymer composites by making use of High-Performance
1159
+ Computing.
1160
+ 23
1161
+
1162
+ Algorithm 1 Main code for the partial Dirichlet-Neumann (PDN) contact algorithm.
1163
+ This PDN contact algorithm is treated as a coupling problem between two or more body instances.
1164
+ In the present algorithm,
1165
+ we describe the contact algorithm between two code instances: a rigid body represented by the domain Ωa and the deformable
1166
+ body represented by the domain Ωb.
1167
+ The coupling is performed through the exchange of boundary conditions at the contact
1168
+ interface following a Gauss-Seidel strategy. At each time step, contact detection is done for both instances, and synchronization
1169
+ and localization is executed. When contact is detected (at least one boundary node belonging to the deformable body is penetrated
1170
+ inside the rigid body), the rigid one computes and sends to the deformable body all the information required for the enforcement
1171
+ of the kinematic boundary conditions.
1172
+ The reader is referred to the Ph.D. from Rivero [46], or [19] for more details on the
1173
+ implementation aspects of the proposed contact algorithm.
1174
+ Require: Ωa, Ωb
1175
+ 1: loop time
1176
+ 2:
1177
+ Compute time step, tn+1
1178
+ 3:
1179
+ loop reset
1180
+ 4:
1181
+ if Rigid body, Ωa then
1182
+ 5:
1183
+ Contact detection (localization)
1184
+ ▷ Contact detection & localization, Algo. 1 in [19]
1185
+ 6:
1186
+ Exchange data: receive f cont from Ωb
1187
+ ▷ Exchange & communication data, Algo. 2 in [19]
1188
+ 7:
1189
+ call calculateProjections()
1190
+ ▷ Projections & local coordinate system, Algo. 3 in [19]
1191
+ 8:
1192
+ call RK4Scheme()
1193
+ ▷ Solve system
1194
+ 9:
1195
+ Exchange data: send projection data to Ωb
1196
+ ▷ Exchange & communication data, Algo. 2 in [19]
1197
+ 10:
1198
+ end if
1199
+ 11:
1200
+ if Deformable body, Ωb then
1201
+ 12:
1202
+ Contact detection (localization)
1203
+ ▷ Contact detection & localization, Algo. 1 in [19]
1204
+ 13:
1205
+ Exchange data: receive data (projections) from Ωa
1206
+ ▷ Algo. 2 in [19]
1207
+ 14:
1208
+ call EssentialBoundaryCondition()
1209
+ ▷ Contact nodes & Dirichlet condition, Algo. 4 in [19]
1210
+ 15:
1211
+ call ExplicitScheme()
1212
+ ▷ Solve system
1213
+ 16:
1214
+ call ReleaseNodes()
1215
+ ▷ Algo. 2
1216
+ 17:
1217
+ Exchange: send f cont to Ωa
1218
+ ▷ Exchange & communication data, Algo. 2 in [19]
1219
+ 18:
1220
+ end if
1221
+ 19:
1222
+ if kfl reset = 0 then
1223
+ 20:
1224
+ exit loop reset
1225
+ 21:
1226
+ end if
1227
+ 22:
1228
+ end loop
1229
+ 23: end loop
1230
+ Algorithm 2 ReleaseNodes() algorithm for explicit time integration schemes
1231
+ This algorithm is executed concurrently and for each subdomain at the end of the time step tn+1. The kfl reset is the key flag for
1232
+ the repetition of the current time step tn+1 when exists adhesion contact nodes. The sign of the contact force is checked according
1233
+ to Eq. 2. The key flag to release the adhesion nodes is called kfl nodes to release. Then the adhesion contact nodes are released
1234
+ (as free non-contacting nodes), and the time step is repeated, activating the reset key flag. As all the subdomains require to know
1235
+ if the time step has to be repeated or not, the MPI_MAX is in charge to collect the value of the reset for all the subdomains of the
1236
+ mesh.
1237
+ 1: kfl reset ← 0
1238
+ 2: Get contact force f c and mark adhesion nodes
1239
+ 3: if kfl nodes to release then
1240
+ 4:
1241
+ Adhesion nodes are set to free nodes
1242
+ 5:
1243
+ kfl reset ← 1
1244
+ 6: end if
1245
+ 7: call MPI_MAX(kfl reset)
1246
+ 24
1247
+
1248
+ Algorithm 3 Recursive mesh multiplication algorithm
1249
+ The level of mesh refinement is set with the parameter ndivi. ne, nn and nb are the total number of elements, nodes and boundaries
1250
+ of the new mesh. The same parameters with the superscript 0 indicate the initial dimension of the mesh. In order to define the
1251
+ dimensions of the new mesh is necessary to know the total number of edges (nedgg) and faces nfacg of the initial mesh. Then,
1252
+ once the dimensions are known, the DivideMesh() subroutine is in charge of doing the following actions: i) divide each edge and
1253
+ face from the initial mesh, ii) define the new element connectivities, iv) define the new element boundary connectivities and v)
1254
+ assign the material codes and the corresponding fields such as material coordinate systems. The last step of the mesh division
1255
+ algorithm is to reconstruct the interface domains through the ReconstructInterfaceDomains() subroutine. The reader is referred
1256
+ to Houzeuax et al. [22] for more details on the implementation and parallel aspects of the proposed algorithm.
1257
+ Input n0
1258
+ e, n0
1259
+ n, n0
1260
+ b , ndivi
1261
+ Output ne, nn, nb
1262
+ 1: for idivi = 1, ndivi do
1263
+ 2:
1264
+ nedgg = GetEdges()
1265
+ 3:
1266
+ nfacg = GetFaces()
1267
+ 4:
1268
+ ne, nn, nb = GetDimensions()
1269
+ ▷ Eq. 14
1270
+ 5:
1271
+ call DivideMesh()
1272
+ ▷ Sec. 3.2 [22]
1273
+ 6:
1274
+ call ReconstructInterfaceDomains()
1275
+ ▷ Sec. 3.2 [22]
1276
+ 7: end for
1277
+ Algorithm 4 Workflow of the intralaminar damage model.
1278
+ The strain and stress tensors, ε and σ, are defined in the material coordinate system using compact notation [6]. The superscripts
1279
+ n and n + 1 define the past and current time steps, respectively. The subscripts N indicate the four damage mechanisms associated
1280
+ with the loading function φN and internal threshold variables rN (fibre breaking, fibre kinking, tensile matrix cracking, and
1281
+ compressive matrix cracking).
1282
+ In turn, the subscript M indicates the five uniaxial damage states DM, represented in 2.
1283
+ The
1284
+ required material properties are i) elastic properties (E11, E22, ν12, ν23, G12 ), ii) ply strengths (XT , XC, YT , YC, SL), iii) fracture
1285
+ toughness (GXT , GXC, GY T , GY C, GSL) associated with the damage mechanism, and iv) yield strength and hardening (Sp, Kp);
1286
+ all these properties can be obtained through standardised tests or computational micromechanics simulations [30]. The required
1287
+ parameters are: characteristic element length ℓc [33] and state variables at the past time step, i.e. εn
1288
+ p , and rt
1289
+ M. At the initial time
1290
+ step, the state variables are initialised as εn
1291
+ p = 0 and rn
1292
+ M = 1.
1293
+ Input εn+1, εn
1294
+ p , rn
1295
+ M, ℓc, material properties
1296
+ Output σn+1, εn+1
1297
+ p
1298
+ , rn+1
1299
+ M
1300
+ 1: εn+1
1301
+ p
1302
+ (εn
1303
+ p )
1304
+ ▷ Plastic strains, yield function in [51]
1305
+ 2: εn+1
1306
+ e
1307
+ ← εn+1 − εn+1
1308
+ p
1309
+ ▷ Effective elastic strains
1310
+ 3: σn+1
1311
+ e
1312
+ ← H−1 · εn+1
1313
+ e
1314
+ ▷ Effective compliance matrix H in [30]g
1315
+ 4: φn+1
1316
+ M
1317
+ (σn+1
1318
+ e
1319
+ )
1320
+ ▷ Loading functions (failure criteria), Eqs. 8, 13, 20, 21 in [32]
1321
+ 5: rn+1
1322
+ N
1323
+ (φn+1
1324
+ N
1325
+ , rn
1326
+ N)
1327
+ ▷ Damage thresholds, Eqs. 24, 26 in [32]
1328
+ 6: Dn+1
1329
+ M
1330
+ (rn+1
1331
+ N
1332
+ )
1333
+ ▷ Damage state variables according [51] and Eq 6 in [32]
1334
+ 7: σn+1 ← H−1(Dn+1
1335
+ M
1336
+ ) · εn+1
1337
+ e
1338
+ ▷ Nominal compliance matrix H(Dn+1
1339
+ M
1340
+ ) in [30]
1341
+ 25
1342
+
1343
+ Algorithm 5 Workflow of the cohesive zone model.
1344
+ The displacement jumps and interface tractions, ∆ = {∆1, ∆2, ∆3}T and τ = {τ1, τ2, τ3}T , are defined at the mid-plane being
1345
+ 1 and 2 tangential and 3 normal directions. The superscripts t and t + 1 define the past and current time steps, respectively. In
1346
+ turn, the subscript M indicates the pure-mode I and II openings associated with the opening directions, I ↔ {3} and II ↔ {1, 2}.
1347
+ The latter is also referred with the subscript sh in [56]. The required input parameters are i) onset displacement jumps (∆Mo),
1348
+ ii) critical displacement jumps (∆Mc), iii) penalty stiffness (KM), and iv) Benzeggagh-Kenane exponent for the mixed-mode ratio
1349
+ (η). The onset and critical jumps can be obtained from the cohesive strengths (τM) and fracture toughness material properties by
1350
+ ∆Mo = τM/KM and ∆Mc = 2GM/τM, respectively. The damage threshold state variable at the past time rn
1351
+ D, which is initialised
1352
+ at the initial time step as rn
1353
+ D = 0, is also required to evaluate the model.
1354
+ Input ∆n+1, rn
1355
+ D, material properties
1356
+ Output τ n+1, rn+1
1357
+ D
1358
+ 1: Kn+1
1359
+ B
1360
+ (∆n+1)
1361
+ ▷ Local mixed-mode penatly stiffness, Eq. 13 in [56]
1362
+ 2: Bn+1(∆n+1)
1363
+ ▷ Local mixed-mode ratio, Eq. 17 in [56]
1364
+ 3: λn+1
1365
+ o
1366
+ (Bn+1, Kn+1
1367
+ B
1368
+ )
1369
+ ▷ Local mixed-mode onset jump, Eq. 26 in [56]
1370
+ 4: λn+1
1371
+ c
1372
+ (Bn+1, Kn+1
1373
+ B
1374
+ , λn+1
1375
+ o
1376
+ )
1377
+ ▷ Local mixed-mode propagation jump, Eq. 24 in [56]
1378
+ 5: λt+1(∆t+1)
1379
+ ▷ Local mixed-mode equivalent jump, Eq. 12 in [56]
1380
+ 6: Hn+1(λn+1, λn+1
1381
+ o
1382
+ , λn+1
1383
+ c
1384
+ )
1385
+ ▷ Loading function (failure criteria), Eq. 20 in [56]
1386
+ 7: rn+1
1387
+ D
1388
+ (Hn+1, rn
1389
+ D)
1390
+ ▷ Damage threshold, Eq. 21 in [56]
1391
+ 8: Dn+1(rn+1
1392
+ D
1393
+ , λn+1
1394
+ o
1395
+ , λn+1
1396
+ c
1397
+ )
1398
+ ▷ Damage state, Eq. 20 in [56]
1399
+ 9: τ n+1
1400
+ coh (Dn+1, ∆t+1)
1401
+ ▷ Cohesive tractions, Eq. 7 in [56]
1402
+ 10: τ n+1
1403
+ con (∆n+1)
1404
+ ▷ Contact tractions, Eq. 8 in [56]
1405
+ 11: τ n+1 ← τ n+1
1406
+ coh + τ n+1
1407
+ con
1408
+ References
1409
+ [1] Abrate, S., 1994. Impact on Laminated Composites: Recent Advances. Applied Mechanics Reviews 47, 517–544. doi:10.
1410
+ 1115/1.3111065.
1411
+ [2] Allix, O., Corigliano, A., 1996.
1412
+ Modeling and simulation of crack propagation in mixed-modes interlaminar fracture
1413
+ specimens. International Journal of Fracture 77, 111–140. doi:10.1007/BF00037233.
1414
+ [3] Ansys Mechanical APDL, . ANSYS Inc., Help system, Theory Reference Chapter 4: Structures with Material Nonlinear-
1415
+ ities.
1416
+ [4] Bathe, K., Chaudhary, A., 1985. A solution method for planar and axisymmetric contact problems. International Journal
1417
+ for Numerical Methods in Engineering 21, 65–85. doi:10.1016/b978-1-85617-802-0.00007-4.
1418
+ [5] Baˇzant, Z.P., Oh, B.H., 1983.
1419
+ Crack band theory for fracture of concrete.
1420
+ Mat´eriaux et Construction 16, 155–177.
1421
+ doi:10.1007/BF02486267.
1422
+ [6] Belytschko, T., Kam Liu, W., Moran, B., Elkhodary, K.I., 2014. Nonlinear Finite Elements for Continua and Structures.
1423
+ 1. second edi ed., Wiley. doi:10.1088/1751-8113/44/8/085201.
1424
+ [7] Bisagni, C., Vescovini, R., D´avila, C.G., 2011.
1425
+ Single-Stringer Compression Specimen for the Assessment of Damage
1426
+ Tolerance of Postbuckled Structures. Journal of Aircraft 48, 495–502. doi:10.2514/1.c031106.
1427
+ [8] Borrell, R., Cajas, J.C., Mira, D., Taha, A., Koric, S., V´azquez, M., Houzeaux, G., 2018. Parallel mesh partitioning based
1428
+ on space filling curves. Computers and Fluids 173, 264–272. doi:10.1016/j.compfluid.2018.01.040.
1429
+ [9] de Borst, R., Remmers, J.J.C., 2006. Computational modelling of delamination. Composites Science and Technology 66,
1430
+ 713–722. doi:10.1016/j.compscitech.2004.12.025. advances in statics and dynamics of delamination.
1431
+ [10] Bouvet, C., Castani´e, B., Bizeul, M., Barrau, J.J., 2009. Low velocity impact modelling in laminate composite panels
1432
+ with discrete interface elements. International Journal of Solids and Structures 46, 2809–2821. doi:10.1016/j.ijsolstr.
1433
+ 2009.03.010.
1434
+ 26
1435
+
1436
+ [11] Camanho, P.P., Maim´ı, P., D´avila, C.G., 2007. Prediction of size effects in notched laminates using continuum damage
1437
+ mechanics. Composites Science and Technology 67, 2715–2727. doi:10.1016/j.compscitech.2007.02.005.
1438
+ [12] Carreras, L., Guillamet, G., Quintanas-Corominas, A., Renart, J., Turon, A., 2021. Mesoscale modelling of delamination
1439
+ using the cohesive zone model approach, in: Van Paepegem, W. (Ed.), Multi-Scale Continuum Mechanics Modelling of
1440
+ Fibre-Reinforced Polymer Composites. Woodhead Publishing. Woodhead Publishing Series in Composites Science and
1441
+ Engineering, pp. 555–577. doi:10.1016/b978-0-12-818984-9.00018-4.
1442
+ [13] Cheng, Z.Q., Tan, W., Xiong, J.J., 2022. Modelling pre-fatigue, low-velocity impact and post-impact fatigue behaviours of
1443
+ composite helicopter tail structures under multipoint coordinated loading spectrum. Thin-Walled Structures 176, 109349.
1444
+ doi:10.1016/j.tws.2022.109349.
1445
+ [14] Fournier, Y., 2014. Parallel location and exchange. Technical Report. ´Electricite de France (EDF).
1446
+ [15] Furtado, C., Catalanotti, G., Arteiro, A., Gray, P.J., Wardle, B.L., Camanho, P.P., 2019. Simulation of failure in laminated
1447
+ polymer composites: Building-block validation.
1448
+ Composite Structures 226, 111168.
1449
+ doi:10.1016/j.compstruct.2019.
1450
+ 111168.
1451
+ [16] Gallego, F.J., Anza, J.J., 1989. A mixed finite element model for the elastic contact problem. International Journal for
1452
+ Numerical Methods in Engineering 28, 1249–1264. doi:10.1002/nme.1620280603.
1453
+ [17] Gonz´alez, E.V., Maim´ı, P., Camanho, P.P., Turon, A., Mayugo, J.A., 2012.
1454
+ Simulation of drop-weight impact and
1455
+ compression after impact tests on composite laminates. Composite Structures 94, 3364–3378. doi:10.1016/j.compstruct.
1456
+ 2012.05.015.
1457
+ [18] Gonz´alez, E.V., Maim´ı, P., Camanho, P.P., Lopes, C.S., Blanco, N., 2011. Effects of ply clustering in laminated composite
1458
+ plates under low-velocity impact loading. Composites Science and Technology 71, 805–817. doi:10.1016/j.compscitech.
1459
+ 2010.12.018.
1460
+ [19] Guillamet, G., Rivero, M., Zavala-Ak´e, M., V´azquez, M., Houzeaux, G., Oller, S., 2022. A parallel algorithm for unilateral
1461
+ contact problems. Computers and Structures 271, 106862. doi:10.1016/j.compstruc.2022.106862.
1462
+ [20] Guillamet, G., Turon, A., Costa, J., Linde, P., 2016. A quick procedure to predict free-edge delamination in thin-ply
1463
+ laminates under tension. Engineering Fracture Mechanics 168, 28–39. doi:10.1016/j.engfracmech.2016.01.019. modeling
1464
+ of fracture and damage in composite materials.
1465
+ [21] Har, J., Fulton, R.E., 2003. A parallel finite element procedure for contact-impact problems. Engineering with Computers
1466
+ 19, 67–84. doi:10.1007/s00366-003-0252-4.
1467
+ [22] Houzeaux, G., de la Cruz, R., Owen, H., V´azquez, M., 2013.
1468
+ Parallel Uniform Mesh Multiplication Applied To A
1469
+ Navier–stokes Solver. Computers and Fluids 80, 142–151. doi:10.1016/j.compfluid.2012.04.017. selected contributions
1470
+ of the 23rd International Conference on Parallel Fluid Dynamics ParCFD2011.
1471
+ [23] International, A., 2020. ASTM D7136/D7136M-20, Standard Test Method for Measuring the Damage Resistance of a
1472
+ Fiber-Reinforced Polymer Matrix Composite to a Drop-Weight Impact Event.
1473
+ [24] Krause, R.H., Wohlmuth, B.I., 2002. A Dirichlet-Neumann type algorithm for contact problems with friction. Computing
1474
+ and Visualization in Science doi:10.1007/s00791-002-0096-2.
1475
+ [25] Lapeer, R., Gerikhanov, Z., Sadulaev, S.M., Audinis, V., Rowland, R., Crozier, K., Morris, E., 2019. A computer-based
1476
+ simulation of childbirth using the partial Dirichlet–Neumann contact method with total Lagrangian explicit dynamics on
1477
+ the GPU. Biomechanics and Modeling in Mechanobiology 18, 681–700. doi:10.1007/s10237-018-01109-x.
1478
+ [26] Llobet, J., Maim´ı, P., Essa, Y., de la Escalera, F.M., 2021a. A continuum damage model for composite laminates: Part
1479
+ III - Fatigue. Mechanics of Materials 153, 103659. doi:10.1016/j.mechmat.2020.103659.
1480
+ [27] Llobet, J., Maim´ı, P., Turon, A., Bak, B.L.V., Lindgaard, E., Carreras, L., Essa, Y., de la Escalera, F.M., 2021b. A
1481
+ continuum damage model for composite laminates: Part IV- Experimental and numerical tests. Mechanics of Materials
1482
+ 154, 103686. doi:10.1016/j.mechmat.2020.103686.
1483
+ 27
1484
+
1485
+ [28] Lopes, C.S., Camanho, P.P., G¨urdal, Z., Maim´ı, P., Gonz´alez, E.V., 2009. Low-velocity impact damage on dispersed
1486
+ stacking sequence laminates. Part II: Numerical simulations. Composites Science and Technology 69, 937–947. doi:10.
1487
+ 1016/j.compscitech.2009.02.015.
1488
+ [29] Lopes, C.S., Gonz´alez, C., Falc´o, O., Naya, F., LLorca, J., Tijs, B., 2016a. Multiscale virtual testing: the roadmap to
1489
+ efficient design of composites for damage resistance and tolerance. CEAS Aeronautical Journal 7, 607–619. doi:10.1007/
1490
+ s13272-016-0210-7.
1491
+ [30] Lopes, C.S., S´adaba, S., Gonz´alez, C., Llorca, J., Camanho, P.P., 2016b. Physically-sound simulation of low-velocity impact
1492
+ on fiber reinforced laminates. International Journal of Impact Engineering 92, 3–17. doi:10.1016/j.ijimpeng.2015.05.014.
1493
+ impact Loading on Lightweight Structures.
1494
+ [31] Maheo, L., Grolleau, V., Rio, G., 2009. Damping efficiency of the Tchamwa-Wielgosz explicit dissipative scheme under
1495
+ instantaneous loading conditions. Comptes Rendus - Mecanique 337, 722–732. doi:10.1016/j.crme.2009.10.005.
1496
+ [32] Maim´ı, P., Camanho, P.P., Mayugo, J.A., D´avila, C.G., 2007a. A continuum damage model for composite laminates: Part
1497
+ I - Constitutive model. Mechanics of Materials 39, 897–908. doi:10.1016/j.mechmat.2007.03.005.
1498
+ [33] Maim´ı, P., Camanho, P.P., Mayugo, J.A., D´avila, C.G., 2007b. A continuum damage model for composite laminates: Part
1499
+ II - Computational implementation and validation. Mechanics of Materials 39, 909–919. doi:10.1016/j.mechmat.2007.
1500
+ 03.006.
1501
+ [34] Malone, J.G., Johnsok, K.L., 1994. A parallel finite element contact/impact algorithm for non-linear explicit transient
1502
+ analysis: Part I - The search algorithm and contact mechanics. International Journal for Numerical Methods in Engineering
1503
+ 37, 559–590. doi:10.1002/nme.1620370403.
1504
+ [35] Malone, J.G., Johnson, N.L., 1994. A parallel finite element contact/impact algorithm for non-linear explicit transient
1505
+ analysis: Part II-Parallel implementation. International Journal for Numerical Methods in Engineering doi:10.1002/nme.
1506
+ 1620370404.
1507
+ [36] Marlett, K., 2011. Hexcel 8552 AS4 Unidirectional Prepreg at 190gsm and 35% RC Qualification Material Property Data
1508
+ Report. Technical Report. National institute for aviation research.
1509
+ [37] van der Meer, F.P., 2012. Mesolevel Modeling of Failure in Composite Laminates: Constitutive, Kinematic and Algorithmic
1510
+ Aspects. Archives of Computational Methods in Engineering 19, 381–425. doi:10.1007/s11831-012-9076-y.
1511
+ [38] Nguyen, M.Q., Elder, D.J., Bayandor, J., Thomson, R.S., Scott, M.L., 2005. A review of explicit finite element software
1512
+ for composite impact analysis. Journal of Composite Materials 39, 375–386. doi:10.1177/0021998305046739.
1513
+ [39] Ortiz, M., Pandolfi, A., 1999. Finite-deformation irreversible cohesive elements for three-dimensional crack-propagation
1514
+ analysis.
1515
+ Numerical Methods in Engineering 44, 1267–1282.
1516
+ doi:10.1002/(SICI)1097-0207(19990330)44:9<1267::
1517
+ AID-NME486>3.0.CO;2-7.
1518
+ [40] Plagianakos, T., Mu˜noz, K., Guillamet, G., Prentzias, V., Quintanas-Corominas, A., Jimenez, M., Karachalios, E., 2020.
1519
+ Assessment of CNT-doping and hot-wet storage aging effects on Mode I, II and I/II interlaminar fracture toughness of a UD
1520
+ Graphite/Epoxy material system. Engineering Fracture Mechanics 224, 106761. doi:10.1016/j.engfracmech.2019.106761.
1521
+ [41] Quintanas-Corominas, A., Maim´ı, P., Casoni, E., Turon, A., Mayugo, J.A., Guillamet, G., V´azquez, M., 2018. A 3D
1522
+ transversally isotropic constitutive model for advanced composites implemented in a high performance computing code.
1523
+ European Journal of Mechanics - A/Solids 71, 278–291. doi:10.1016/j.euromechsol.2018.03.021.
1524
+ [42] Quintanas-Corominas, A., Turon, A., Reinoso, J., Casoni, E., Paggi, M., Mayugo, J.A., 2020. A phase field approach
1525
+ enhanced with a cohesive zone model for modeling delamination induced by matrix cracking.
1526
+ Computer Methods in
1527
+ Applied Mechanics and Engineering 358, 112618. doi:10.1016/j.cma.2019.112618.
1528
+ [43] Reinoso, J., Bl´azquez, A., Estefani, A., Par´ıs, F., Ca˜nas, J., 2013.
1529
+ A composite runout specimen subjected to ten-
1530
+ sion–compression loading conditions: Experimental and global–local finite element analysis. Composite Structures 101,
1531
+ 274–289. doi:10.1016/j.compstruct.2012.12.056.
1532
+ 28
1533
+
1534
+ [44] Reinoso, J., Bl´azquez, A., T´avara, L., Par´ıs, F., Arellano, C., 2016. Damage tolerance of composite runout panels under
1535
+ tensile loading. Composites Part B: Engineering 96, 79–93. doi:10.1016/j.compositesb.2016.03.083.
1536
+ [45] Reinoso, J., Paggi, M., 2014.
1537
+ A consistent interface element formulation for geometrical and material nonlinearities.
1538
+ Comput Mech 54, 1569–1581. doi:10.1007/s00466-014-1077-2.
1539
+ [46] Rivero, M., 2018. A Parallel Algorithm for Deformable Contact Problems. Phd thesis. Universitat Polit`ecnica de Catalunya,
1540
+ Escola T`ecnica Superior d’Enginyers de Camins, Canals i Ports de Barcelona.
1541
+ [47] Sarrado, C., Leone, F.A., Turon, A., 2016. Finite-thickness cohesive elements for modeling thick adhesives. Engineer-
1542
+ ing Fracture Mechanics 168, 105–113.
1543
+ doi:10.1016/j.engfracmech.2016.03.020. modeling of fracture and damage in
1544
+ composite materials.
1545
+ [48] Sasikumar, A., Costa, J., Trias, D., Llobet, J., C´ozar, I.R., Turon, A., Linde, P., 2020. A virtual testing based search
1546
+ for optimum compression after impact strength in thin laminates using ply-thickness hybridization and unsymmetrical
1547
+ designs. Composites Science and Technology 196, 108188. doi:10.1016/j.compscitech.2020.108188.
1548
+ [49] Schellekens, J.C.J., De Borst, R., 1993. On the numerical integration of interface elements. International Journal for
1549
+ Numerical Methods in Engineering 36, 43–66. doi:10.1002/nme.1620360104.
1550
+ [50] Sommer, D.E., Thomson, D., Falc´o, O., Quino, G., Cui, H., Petrinic, N., 2022.
1551
+ Damage modelling of carbon fibre
1552
+ composite crush tubes: Numerical simulation and experimental validation of drop weight impact. Composites Part A:
1553
+ Applied Science and Manufacturing 160, 107033. doi:10.1016/j.compositesa.2022.107033.
1554
+ [51] Soto, A., Gonz´alez, E.V., Maim´ı, P., Mayugo, J.A., Pasquali, P.R., Camanho, P.P., 2018a. A methodology to simulate
1555
+ low velocity impact and compression after impact in large composite stiffened panels. Composite Structures 204, 223–238.
1556
+ doi:10.1016/j.compstruct.2018.07.081.
1557
+ [52] Soto, A., Gonz´alez, E.V., Maim´ı, P., Mart´ın de la Escalera, F., Sainz de Aja, J.R., Alvarez, E., 2018b. Low velocity impact
1558
+ and compression after impact simulation of thin ply laminates. Composites Part A: Applied Science and Manufacturing
1559
+ 109, 413–427. doi:10.1016/j.compositesa.2018.03.017.
1560
+ [53] Tan, W., Falzon, B.G., Chiu, L.N.S., Price, M., 2015. Predicting low velocity impact damage and Compression-After-
1561
+ Impact (CAI) behaviour of composite laminates. Composites Part A: Applied Science and Manufacturing 71, 212–226.
1562
+ doi:10.1016/j.compositesa.2015.01.025.
1563
+ [54] Turon, A., Camanho, P.P., Costa, J., D´avila, C.G., 2006. A damage model for the simulation of delamination in advanced
1564
+ composites under variable-mode loading. Mechanics of Materials 38, 1072–1089. doi:10.1016/j.mechmat.2005.10.003.
1565
+ [55] Turon, A., D´avila, C.G., Camanho, P.P., Costa, J., 2007. An engineering solution for mesh size effects in the simulation of
1566
+ delamination using cohesive zone models. Engineering Fracture Mechanics 74, 1665–1682. doi:10.1016/j.engfracmech.
1567
+ 2006.08.025.
1568
+ [56] Turon, A., Gonz´alez, E.V., Sarrado, C., Guillamet, G., Maim´ı, P., 2018.
1569
+ Accurate simulation of delamination under
1570
+ mixed-mode loading using a cohesive model with a mode-dependent penalty stiffness. Composite Structures 184, 506–511.
1571
+ doi:10.1016/j.compstruct.2017.10.017.
1572
+ [57] V´azquez, M., Houzeaux, G., Koric, S., Artigues, A., Aguado-Sierra, J., Ar´ıs, R., Mira, D., Calmet, H., Cucchietti, F.,
1573
+ Owen, H., Taha, A., Burness, E.D., Cela, J.M., Valero, M., 2016.
1574
+ Alya: Multiphysics engineering simulation toward
1575
+ exascale. Journal of Computational Science 14, 15–27. doi:10.1016/j.jocs.2015.12.007.
1576
+ [58] Weyler, R., Oliver, J., Sain, T., Cante, J.C., 2012. On the contact domain method: A comparison of penalty and Lagrange
1577
+ multiplier implementations. Computer Methods in Applied Mechanics and Engineering 205-208, 68–82. doi:10.1016/j.
1578
+ cma.2011.01.011. special Issue on Advances in Computational Methods in Contact Mechanics.
1579
+ [59] Yastrebov, V.A., 2011. Computational Contact Mechanics. Phd thesis. MINES ParisTech. doi:10.1017/CBO9781107415324.
1580
+ 004.
1581
+ [60] Yastrebov, V.A., Breitkopf, P., 2013. Numerical Methods in Contact Mechanics. doi:10.1002/9781118647974.
1582
+ 29
1583
+
1584
+ [61] Zavala-Ak´e, J.M., 2018. A high-performance computing coupling tool for partitioned multi-physics applications. Phd
1585
+ thesis. Universitat Polit`ecnica de Catalunya, Departament de F´ısica.
1586
+ 30
1587
+
F9E5T4oBgHgl3EQfVg_E/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
FNE0T4oBgHgl3EQfQwDu/content/tmp_files/2301.02199v1.pdf.txt ADDED
@@ -0,0 +1,684 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.02199v1 [math.GR] 5 Jan 2023
2
+ On the Generalized Fitting Height and Nonsoluble
3
+ Length of the Mutually Permutable Products of Finite
4
+ Groups∗
5
+ Viachaslau I. Murashka1,2 and Alexander F. Vasil’ev3,2
6
+ Abstract
7
+ The generalized Fitting height h∗(G) of a finite group G is the least number h such
8
+ that F∗
9
+ h(G) = G, where F∗
10
+ (0)(G) = 1, and F∗
11
+ (i+1)(G) is the inverse image of the generalized
12
+ Fitting subgroup F∗(G/F∗
13
+ (i)(G)). Let p be a prime, 1 = G0 ≤ G1 ≤ · · · ≤ G2h+1 = G
14
+ be the shortest normal series in which for i odd the factor Gi+1/Gi is p-soluble (possibly
15
+ trivial), and for i even the factor Gi+1/Gi is a (non-empty) direct product of nonabelian
16
+ simple groups. Then h = λp(G) is called the non-p-soluble length of a group G. We proved
17
+ that if a finite group G is a mutually permutable product of of subgroups A and B then
18
+ max{h∗(A), h∗(B)} ≤ h∗(G) ≤ max{h∗(A), h∗(B)} + 1 and max{λp(A), λp(B)} = λp(G).
19
+ Also we introduced and studied the non-Frattini length.
20
+ Keywords: Finite group; generalized Fitting subgroup; mutually permutable product
21
+ of groups; generalized Fitting height; non-p-soluble length; Plotkin radical.
22
+ 1
23
+ Introduction and the Main Results
24
+ All groups considered here are finite. E.I. Khukhro and P. Shumyatsky introduced and
25
+ studied interesting invariants of a group: the generalized Fitting height and the nonsoluble
26
+ length [11–13]. The first one is the extension of the well known Fitting height to the class of
27
+ all groups and the second one implicitly appeared in [8,20].
28
+ Definition 1.1 (Khukhro, Shumyatsky). (1) The generalized Fitting height h∗(G) of a finite
29
+ group G is the least number h such that F∗
30
+ h(G) = G, where F∗
31
+ (0)(G) = 1, and F∗
32
+ (i+1)(G) is the
33
+ inverse image of the generalized Fitting subgroup F∗(G/F∗
34
+ (i)(G)).
35
+ (2) Let p be a prime, 1 = G0 ≤ G1 ≤ · · · ≤ G2h+1 = G be the shortest normal series
36
+ in which for i odd the factor Gi+1/Gi is p-soluble (possibly trivial), and for i even the factor
37
+ Gi+1/Gi is a (non-empty) direct product of nonabelian simple groups. Then h = λp(G) is called
38
+ the non-p-soluble length of a group G.
39
+ (3) Recall that λ2(G) = λ(G) is the nonsoluble length of a group G.
40
+ In [12] E.I. Khukhro and P. Shumyatsky showed that in the general case the generalized
41
+ Fitting height of a factorized group is not bounded in terms of the generalized Fitting heights
42
+ of factors. The same situation is also for the nonsoluble length.
43
+ Recall [1, Definition 4.1.1] that a group G is called a mutually permutable product of its
44
+ subgroups A and B if G = AB, A permutes with every subgroup of B and B permutes with
45
+ 1email: mvimath@yandex.ru
46
+ 2Francisk Skorina Gomel State University, Gomel, Belarus
47
+ 3email: formation56@mail.ru
48
+ ∗Supported by BFFR Φ23PHΦ-237
49
+ 1
50
+
51
+ every subgroup of A. The products of mutually permutable subgroups is the very interesting
52
+ topic of the theory of groups (for example, see [1, Chapter 4]).
53
+ The main result of our paper is
54
+ Theorem 1.1. Let a group G be the product of the mutually permutable subgroups A and B.
55
+ Then
56
+ (1) max{h∗(A), h∗(B)} ≤ h∗(G) ≤ max{h∗(A), h∗(B)} + 1.
57
+ (2) max{λp(A), λp(B)} = λp(G) for any prime p. In particular, max{λ(A), λ(B)} = λ(G).
58
+ If a group G is soluble, then h∗(G) = h(G) is the Fitting height of a group G.
59
+ Corollary 1.2 ([10]). If a soluble group G is the product of the mutually permutable subgroups
60
+ A and B, then max{h(A), h(B)} ≤ h(G) ≤ max{h(A), h(B)} + 1.
61
+ Example 1.1. Note that the symmetric group S3 of degree 3 is the mutually permutable product
62
+ of the cyclic groups Z2 and Z3 of orders 2 and 3 respectively. Hence h∗(S3) = max{h∗(Z2), h∗(Z3)}+
63
+ 1 = max{h(Z2), h(Z3)} + 1.
64
+ 2
65
+ The Functorial Method
66
+ According to B.I. Plotkin [15] a functorial is a function γ which assigns to each group G its
67
+ characteristic subgroup γ(G) satisfying f(γ(G)) = γ(f(G)) for any isomorphism f : G → G∗.
68
+ We are interested in functorials with some properties:
69
+ (F1) f(γ(G)) ⊆ γ(f(G)) for every epimorphism f : G → G∗.
70
+ (F2) γ(N) ⊆ γ(G) for every N ⊴ G.
71
+ (F3) γ(G) ∩ N ⊆ γ(N) for every N ⊴ G.
72
+ Remark 2.1. (0) Functions F∗ and Rp that assign to every group respectively its the generalized
73
+ Fitting subgroup and the p-soluble radical are examples of functorials. It is well known that they
74
+ satisfy (F1), (F2), (F3).
75
+ (1) Recall that a functorial γ is called a Plotkin radical if it satisfies (F1), idempotent (i.e.
76
+ γ(γ(G)) = γ(G)) and N ⊆ γ(G) for every γ(N) = N ⊴ G [5, p. 28].
77
+ (2) A functorial that satisfies (F3) is often called hereditary (nevertheless, the same word
78
+ means different in the theory of classes of groups).
79
+ (3) A functorial γ is a hereditary Plotkin radical if and only if it satisfies (F1), (F2), (F3).
80
+ Let prove it. Assume that γ is a hereditary Plotkin radical. We need only to prove that it satisfies
81
+ (F2). If N ⊴ G, then γ(N) char N ⊴ G. So γ(N) ⊴ G. Now γ(N) = γ(γ(N)) ⊆ γ(G). Thus
82
+ a hereditary Plotkin radical satisfies (F1), (F2), (F3). Assume that γ satisfies (F1), (F2), (F3).
83
+ We need only to prove that it is idempotent. By (F3) we have γ(G) = γ(G) ∩ G ⊆ γ(γ(G)) ⊆
84
+ γ(G). Thus γ(γ(G)) = γ(G).
85
+ (4) The functorial Φ which assigns to every group G its Frattini subgroup Φ(G) satisfies
86
+ (F1) and (F2) but not (F3).
87
+ (5) If γ satisfies (F2) and (F3), then γ(G) ∩ N = γ(N) for every group G and N ⊴ G.
88
+ Lemma 2.1. If γ satisfies (F1) and (F2), then γ(G1 × G2) = γ(G1) × γ(G2) for any groups
89
+ G1 and G2.
90
+ Proof. From Gi ⊴ G1 × G2 it follows that γ(Gi) ⊆ γ(G1 × G2) by (F2) for i ∈ {1, 2}. Note
91
+ that γ(G1 × G2)Gi/Gi ⊆ γ((G1 × G2)/Gi) = (γ(G¯i) × Gi)/Gi by (F1) for i ∈ {1, 2}. Now
92
+ γ(G1 × G2) ⊆ (γ(G1 × G2)G2) ∩ (γ(G1 × G2)G1) ⊆
93
+ (γ(G1) × G2) ∩ (G1 × γ(G2)) = γ(G1) × γ(G2).
94
+ Thus γ(G1 × G2) = γ(G1) × γ(G2).
95
+ 2
96
+
97
+ Recall [15] that for functorials γ1 and γ2 the upper product γ2 ⋆ γ1 is defined by
98
+ (γ2 ⋆ γ1)(G)/γ2(G) = γ1(G/γ2(G)).
99
+ Proposition 2.2. Let γ1 and γ2 be functorials. If γ1 and γ2 satisfy (F1) and (F2), then γ2 ⋆γ1
100
+ satisfies (F1) and (F2). Moreover if γ1 and γ2 also satisfy (F3), then γ2 ⋆ γ1 satisfies (F3).
101
+ Proof. (1) γ2 ⋆ γ1 satisfies (F1).
102
+ Let f : G → f(G) be an epimorphism. From f(γ2(G)) ⊆ γ2(f(G)) it follows that the
103
+ following diagram is commutative.
104
+ G
105
+ f
106
+
107
+ f4
108
+ �P
109
+ P
110
+ P
111
+ P
112
+ P
113
+ P
114
+ P
115
+ P
116
+ P
117
+ P
118
+ P
119
+ P
120
+ P
121
+ P
122
+ P
123
+ f1
124
+
125
+ f(G)
126
+ f3
127
+
128
+ G/γ2(G)
129
+ f2� f(G)/γ2(f(G))
130
+ Let X = γ1(G/γ2(G)) and Y = γ1(f(G)/γ2(f(G))). Note that (γ2 ⋆ γ1)(G) = f −1
131
+ 1 (X) and
132
+ (γ2 ⋆ γ1)(f(G)) = f −1
133
+ 3 (Y ) by the definition of γ2 ⋆ γ1. Since γ1 satisfies (F1), we see that
134
+ f2(X) ⊆ Y . Hence X ⊆ f −1
135
+ 2 (Y ). Now (γ2 ⋆ γ1)(G) ⊆ f −1
136
+ 1 (f −1
137
+ 2 (Y )) = f −1
138
+ 4 (Y ). So
139
+ f((γ2 ⋆ γ1)(G)) ⊆ f(f −1
140
+ 4 (Y )) = f −1
141
+ 3 (Y ) = (γ2 ⋆ γ1)(f(G)).
142
+ Thus γ2 ⋆ γ1 satisfies (F1).
143
+ (2) γ2 ⋆ γ1 satisfies (F2).
144
+ Let N ⊴ G. From γ2(N) char N ⊴ G it follows that γ2(N) ⊴ G. Since γ2 satisfies (F2),
145
+ we see that γ2(N) ⊆ γ2(G). So the following diagram is commutative.
146
+ G
147
+ f1�
148
+ f3
149
+ �❍
150
+
151
+
152
+
153
+
154
+
155
+
156
+
157
+
158
+
159
+ G/γ2(N)
160
+ f2
161
+
162
+ G/γ2(G)
163
+ Let X = γ1(G/γ2(N)), Y = γ1(N/γ2(N)) and Z = γ1(G/γ2(G)). Note that (γ2 ⋆ γ1)(G) =
164
+ f −1
165
+ 3 (Z) and (γ1 ⋆ γ2)(N) ⊆ f −1
166
+ 1 (Y ). Since γ1 satisfies (F1) and (F2), we see that f2(X) ⊆ Z
167
+ and Y ⊆ X. Now
168
+ (γ2 ⋆ γ1)(N) ⊆ f −1
169
+ 1 (Y ) ⊆ f −1
170
+ 1 (X) ⊆ f −1
171
+ 1 (f −1
172
+ 2 (Z)) = f −1
173
+ 3 (Z) = (γ2 ⋆ γ1)(G).
174
+ Hence γ2 ⋆ γ1 satisfies (F2).
175
+ (3) If γ1 and γ2 also satisfy (F3), then γ2 ⋆ γ1 satisfies (F3).
176
+ Assume that γ1 and γ2 satisfy (F2) and (F3). Let N ⊴ G.
177
+ Since Nγ2(G)/γ2(G) ∩ (γ2 ⋆ γ1)(G)/γ2(G) ⊴ (γ2 ⋆ γ1)(G)/γ2(G) = γ1(G/γ2(G)), we see by
178
+ (5) of Remark 2.1 that
179
+ γ1((Nγ2(G) ∩ (γ2 ⋆ γ1)(G))/γ2(G)) = (Nγ2(G) ∩ (γ2 ⋆ γ1)(G))/γ2(G).
180
+ Note that
181
+ (Nγ2(G) ∩ (γ2 ⋆ γ1)(G))/γ2(G) =
182
+ (N ∩ (γ2 ⋆ γ1)(G))γ2(G)/γ2(G) ≃ (N ∩ (γ2 ⋆ γ1)(G))/(N ∩ γ2(G))
183
+ = (N ∩ (γ2 ⋆ γ1)(G))/γ2(N) ⊴ N/γ2(N).
184
+ It means that (N ∩ (γ2 ⋆ γ1)(G))/γ2(N) ⊆ γ1(N/γ2(N)). Thus N ∩ (γ2 ⋆ γ1)(G) ⊆ (γ2 ⋆ γ1)(N),
185
+ i.e γ2 ⋆ γ1 satisfies (F3).
186
+ 3
187
+
188
+ Here we introduce the height hγ(G) of a group G which corresponds to a given functorial γ.
189
+ Definition 2.1. Let γ be a functorial. Then the γ-series of G is defined starting from γ(0)(G) =
190
+ 1, and then by induction γ(i+1)(G) = (γ(i) ⋆ γ)(G) is the inverse image of γ(G/γ(i)(G)). The
191
+ least number h such that γ(h)(G) = G is defined to be γ-height hγ(G) of G. If there is no such
192
+ number, then hγ(G) = ∞.
193
+ The following Lemma directly follows from Proposition 2.2.
194
+ Lemma 2.3. Let γ be a functorial. If γ satisfies (F1) and (F2), then γ(n) satisfies (F1) and
195
+ (F2) for all natural n. Moreover if γ satisfies (F3), then γ(n) satisfies (F3) for all natural n.
196
+ Lemma 2.4. Let γ be a functorial. If γ satisfies (F1) and (F2), then hγ(G/N) ≤ hγ(G) ≤
197
+ hγ(N) + hγ(G/N) for every N ⊴ G. Moreover, if γ also satisfies (F3), then hγ(N) ≤ hγ(G).
198
+ Proof. Note that γ(n) satisfies (F1) and (F2) for every n by Lemma 2.3.
199
+ Since γ(n) satisfies (F1), G/N = γhγ(G)(G)/N ≤ γ(hγ(G))(G/N) ≤ G/N. So γ(hγ(G))(G/N) =
200
+ G/N. Thus hγ(G/N) ≤ hγ(G).
201
+ Since γ(n) satisfies (F2), we see that N = γ(hγ(N))(N) ⊆ γ(hγ(N))(G). Note that hγ(G/γ(hγ(N))(G)) ≤
202
+ hγ(G/N). Thus hγ(G) ≤ hγ(N) + hγ(G/N).
203
+ Assume that γ also satisfies (F3).
204
+ Then γ(n) satisfies (F3) by Lemma 2.3.
205
+ Now N =
206
+ G ∩ N = γ(hγ(G))(G) ∩ N ⊆ γ(hγ(G))(N) ≤ N. So γ(hγ(G))(N) = N. Thus hγ(N) ≤ hγ(G).
207
+ If γ = F∗, then hγ(G) = h∗(G) for every group G. The non-p-soluble length can also be
208
+ defined with the help of functorials. Here by Rp(G) we denote the p-soluble radical of a group
209
+ G.
210
+ Lemma 2.5. Let Fp = Rp ⋆F∗ ⋆Rp and G be a non-p-soluble group. Then λp(G) is the smallest
211
+ natural i with Fp(i)(G) = G.
212
+ Proof. Let 1 = G0 ≤ G1 ≤ · · · ≤ G2h+1 = G be the shortest normal series in which for i odd the
213
+ factor Gi+1/Gi is p-soluble (possibly trivial), and for i even the factor Gi+1/Gi is a (non-empty)
214
+ direct product of nonabelian simple groups.
215
+ Note that G1 ≤ Rp(G) and G2/G1 is quasinilpotent. Hence G2Rp(G)/Rp(G) is quasinilpo-
216
+ tent. It means that G2Rp(G)/Rp(G) ≤ F∗(G/Rp(G)). Hence G2 ≤ (Rp ⋆ F∗)(G). Since G3/G2
217
+ is p-soluble, we see that G3(Rp ⋆ F∗)(G)/(Rp ⋆ F∗)(G) is p-soluble. Hence G3(Rp ⋆ F∗)(G)/(Rp ⋆
218
+ F∗)(G) ≤ Rp(G/(Rp ⋆ F∗)(G)). It means that G3 ≤ Fp(G) = Fp(1)(G).
219
+ Assume that we proved G2i+1 ≤ Fp(i)(G). Let prove that G2(i+1)+1 ≤ Fp(i+1)(G).
220
+ From G2i+1 ≤ Fp(i)(G) it follows that G2i+1 ≤ (Fp(i) ⋆ Rp)(G).
221
+ Note that G2i+2/G2i+1
222
+ is quasinilpotent. It means that G2i+2(Fp(i) ⋆ Rp)(G)/(Fp(i) ⋆ Rp)(G) is quasinilpotent. Hence
223
+ G2i+2 ≤ ((Fp(i)⋆Rp)⋆F∗)(G). Since G2(i+1)+1/G2i+2 is p-soluble, we see that G2(i+1)+1(Fp(i)⋆Rp⋆
224
+ F∗)(G)/(Fp(i)⋆Rp⋆F∗)(G) is p-soluble. Hence G2(i+1)+1(Fp(i)⋆Rp⋆F∗)(G)/((Fp(i)⋆Rp⋆F∗)(G) ≤
225
+ Rp(G/(Fp(i) ⋆ Rp ⋆ F∗)(G)). It means that G2(i+1)+1 ≤ (Fp(i) ⋆ Rp ⋆ F∗ ⋆ Rp)(G) = Fp(i+1)(G).
226
+ Therefore λp(G) ≥ n where n is the smallest integer with Fp(n)(G) = n. Since Rp ⋆Rp = Rp,
227
+ we see that Fp(n)(G) presents a normal series 1 ≤ F1 ≤ F2 ≤ · · · ≤ F2n+1 in which for i odd the
228
+ factor Fi+1/Fi is p-soluble (possibly trivial), and for i even the factor Fi+1/Fi is a (non-empty)
229
+ direct product of nonabelian simple groups. So λp(G) ≤ n. Thus λp(G) = n.
230
+ Now we are able to estimate the γ-height of the direct product subgroups and of the join
231
+ of subnormal subgroups:
232
+ 4
233
+
234
+ Theorem 2.6. Let γ be a functorial with γ(H) > 1 for every group H that satisfies (F1)
235
+ and (F2).
236
+ (1) If G = ×n
237
+ i=1Ai is the direct product of its normal subgroups Ai, then hγ(G) = max{hγ(Ai) |
238
+ 1 ≤ i ≤ n}.
239
+ (2) Let G = ⟨Ai | 1 ≤ i ≤ n⟩ be the join of its subnormal subgroups Ai. Then hγ(G) ≤
240
+ max{hγ(Ai) | 1 ≤ i ≤ n}. If γ satisfies (F3), then hγ(G) = max{hγ(Ai) | 1 ≤ i ≤ n}.
241
+ Proof. Note that γ(n) satisfies (F1) and (F2) for every n by Proposition 2.2.
242
+ (1) From Lemma 2.1 it follows that if G = ×n
243
+ i=1Ai, then γ(n)(G) = ×n
244
+ i=1γ(n)(Ai). It means
245
+ that hγ(G) = max{hγ(Ai) | 1 ≤ i ≤ n}.
246
+ (2) Assume that G = ⟨Ai | 1 ≤ i ≤ n⟩ is the join of its subnormal subgroups Ai, h1 =
247
+ max{hγ(Ai) | 1 ≤ i ≤ n} and h2 = hγ(G). Since γ(n) satisfies (F2), we see that γ(n)(N) ⊆
248
+ γ(n)(G) for every subnormal subgroup N of G and every n. Now
249
+ G = ⟨Ai | 1 ≤ i ≤ n⟩ = ⟨γ(h1)(Ai) | 1 ≤ i ≤ n⟩ ⊆ γ(h1)(G) ⊆ G.
250
+ Hence γ(h1)(G) = G. It means that h2 ≤ h1.
251
+ Suppose that γ satisfies (F3). Now γ(n) satisfies (F3) for every n by Proposition 2.2. From
252
+ (5) of Remark 2.1 it follows that γ(n)(G) ∩ N = γ(n)(N) for every subnormal subgroup N of G.
253
+ Now Ai = Ai ∩ G = Ai ∩ γ(h2)(G) = γ(h2)(Ai). It means that hγ(Ai) ≤ h2 for every i. Hence
254
+ h1 ≤ h2. Thus h1 = h2.
255
+ Corollary 2.7. Let a group G = ⟨Ai | 1 ≤ i ≤ n⟩ be the join of its subnormal subgroups Ai.
256
+ Then h∗(G) = max{h∗(Ai) | 1 ≤ i ≤ n} and λp(G) = max{λp(Ai) | 1 ≤ i ≤ n}.
257
+ 3
258
+ The Classes of Groups Method
259
+ Recall that a formation is a class F of groups with the following properties: (a) every
260
+ homomorphic image of an F-group is an F-group, and (b) if G/M and G/N are F-groups, then
261
+ also G/(M ∩ N) ∈ F. Recall that the F-residual of a group G is the smallest normal subgroup
262
+ GF of G with G/GF ∈ F.
263
+ A formation is called Fitting if (a) from N ⊴ G ∈ F it follows that N ∈ F and (b) a group
264
+ G ∈ F whenever it is a product of normal F-subgroups. Recall that the F-radical GF of a group
265
+ G is the greatest normal F-subgroup.
266
+ The classes N∗ of all quasinilpotent groups and Sp of all p-soluble groups are Fitting for-
267
+ mations.
268
+ From [3, IX, Remarks 1.11 and Theorem 1.12] and [3, IV, Theorem 1.8] follows
269
+ Lemma 3.1. Let F and H be non-empty Fitting formations. Then
270
+ FH = (G | GF ∈ H) = (G | G/GH ∈ F)
271
+ is a Fitting formation.
272
+ Corollary 3.2. The class Hp = (G | Fp(G) = G) is a Fitting formation.
273
+ It is straightforward to check that for a Fitting formation F, the F-radical can be considered
274
+ as a functorial γ which satisfies (F1), (F2) and (F3). For convenience in this case denote hγ by
275
+ hF. Now h∗(G) = hF∗(G) = hN∗(G) and for a non-p-soluble group λp(G) = hFp(G) = hHp(G).
276
+ Lemma 3.3. Let F be a Fitting formation. If H ̸= 1 and hF(H) < ∞, then hF(HF) = hF(H)−1.
277
+ Proof. Let prove that if H ̸= 1 and hF(H) < ∞, then hF(HF) = hF(H) − 1. Let hF(H) = n
278
+ and hF(HF) = k. Then HF(n−1)(H) < H and H/HF(n−1) ∈ F. It means that HF ≤ HF(n−1).
279
+ Since HF(n−1) satisfies (F3), we see that (HF)F(n−1) = HF. Hence k ≤ n − 1.
280
+ Note that HF = (HF)F(k) ≤ HF(k). It means that H/HF(k) ∈ F. Hence k ≥ n − 1. Thus
281
+ k = n − 1.
282
+ 5
283
+
284
+ If F, H, K ̸= ∅ are formations, then (FH)K = F(HK) by [3, IV, Theorem 1.8]. That is why
285
+ the class Fn = F . . . F
286
+ � �� �
287
+ n
288
+ is a well defined formation.
289
+ Lemma 3.4. For a natural number n and a Fitting formation F holds Fn = (G | hF(G) ≤ n).
290
+ Proof. From Lemma 3.3 it follows that if G ∈ (G | hF(G) ≤ n), then GFn = 1. It means that
291
+ (G | hF(G) ≤ n) ⊆ Fn. Assume that there is a group G ∈ Fn with hF(G) > n. Note that
292
+ G
293
+ F ̸= G for every quotient group G ̸≃ 1 of G. Then hF(GFn) > 0 by Lemma 3.3. It means that
294
+ GFn ̸= 1, a contradiction. Therefore Fn ⊆ (G | hF(G) ≤ n). Thus Fn = (G | h(G) ≤ n).
295
+ In the next lemma we recall the key properties of mutually permutable products
296
+ Lemma 3.5. Let a group G = AB be a mutually permutable product of subgroups A and B.
297
+ Then
298
+ (1) [1, Lemma 4.1.10] G/N = (AN/N)(BN/N) is a mutually permutable product of sub-
299
+ groups AN/N and BN/N for every normal subgroup N of G.
300
+ (2) [1, Lemma 4.3.3(4)] If N is a minimal normal subgroup of a group G, then {N ∩A, N ∩
301
+ B} ⊆ {1, N}.
302
+ (3) [1, Lemma 4.3.3(5)] If N is a minimal normal subgroup of G contained in A and B∩N =
303
+ 1, then N ≤ CG(A) or N ≤ CG(B). If furthermore N is not cyclic, then N ≤ CG(B).
304
+ (4) [1, Theorem 4.3.11] AGBG ̸= 1.
305
+ (5) [1, Corollary 4.1.26] A′ and B′ are subnormal in G.
306
+ Recall that π(G) is the set of all prime divisors of |G|, π(F) = ∪
307
+ G∈Fπ(G) and Nπ denote the
308
+ class of all nilpotent π-groups.
309
+ Lemma 3.6. Let F be a Fitting formation. Assume that hF(G) ≤ h + 1 for every mutually
310
+ permutable product G of two F-subgroups. Then
311
+ max{hF(A), hF(B)} − 1 ≤ hF(G) ≤ max{hF(A), hF(B)} + h
312
+ for every mutually permutable product G of two subgroups A and B with hF(A), hF(B) < ∞.
313
+ Proof. If A = 1 or B = 1, then there is nothing to prove. Assume that A, B ̸= 1. Let a group
314
+ G = AB be the product of mutually permutable subgroups A and B. From hF(A), hF(B) < ∞
315
+ it follows that π(G) ⊆ π(F). According to [3, IX, Lemma 1.8] Nπ(F) ⊆ F. Note that A′ and B′
316
+ are subnormal in G by (5) of Lemma 3.5. Since HF ⊴ HNπ(F) ⊴ H′ holds for every π(F)-group
317
+ H, subgroups AF and BF are subnormal in G. Let C = ⟨AF, BF⟩G = ⟨{(AF)x | x ∈ G}∪{(BF)x |
318
+ x ∈ G}⟩. Then by (2) of Theorem 2.6 and by Lemma 3.3
319
+ hF(C) = max
320
+
321
+ {(hF(AF)x) | x ∈ G} ∪ {(hF(BF)x) | x ∈ G}
322
+
323
+ = max{hF(AF), hF(BF)} = max{hF(A), hF(B)} − 1.
324
+ Now G/C = (AC/C)(BC/C) is a mutually permutable product of F-subgroups AC/C and
325
+ BC/C by (1) of Lemma 3.5. It means that hF(G/C) ≤ h + 1 by our assumption. With the
326
+ help of Lemma 2.4 we see that
327
+ hF(G) ≤ hF(C) + hF(G/C) ≤ max{hF(A), hF(B)} − 1 + 1 + h = max{hF(A), hF(B)} + h.
328
+ From the other hand, hF(G) ≥ hF(C) = max{hF(A), hF(B)} − 1 by (2) of Theorem 2.6.
329
+ Lemma 3.7. Let F be a Fitting formation. Assume that a group G is the least order group
330
+ with
331
+ (1) G is a mutually permutable product of two subgroups A and B with hF(A) ≥ hF(B);
332
+ (2) hF(G) = hF(A) − 1.
333
+ Then G has the unique minimal normal subgroup N, N ≤ A and hF(A/N) = hF(A) − 1.
334
+ 6
335
+
336
+ Proof. Let N be a minimal normal subgroup of G. Then N ∩ A ∈ {N, 1} by (2) of Lemma 3.5.
337
+ Assume that N ∩ A = 1. Now G/N = (AN/N)(BN/N) is a mutually permutable product
338
+ of groups AN/N and BN/N by (1) of Lemma 3.5. By our assumption and hF(G) ≥ hF(G/N) ≥
339
+ hF(AN/N) = hF(A), a contradiction. Hence N ∩ A = N for every minimal normal subgroup N
340
+ of G.
341
+ Now hF(G) + 1 = hF(A) > hF(G) ≥ hF(G/N) ≥ hF(A/N) ≥ hF(A) − 1. It means that
342
+ hF(G) = hF(A/N) = hF(A) − 1.
343
+ If G has two minimal normal subgroups N1 and N2, then hF(A/N1) = hF(A/N2) = hF(A)−1.
344
+ It means hF(A) < hF(A) − 1 by Lemma 3.4, a contradiction. Hence G has a unique minimal
345
+ normal subgroup N.
346
+ 4
347
+ Proof of Theorem 1.1(1)
348
+ Our proof relies on the notion of the X-hypercenter. A chief factor H/K of G is called
349
+ X-central in G provided
350
+ (H/K) ⋊ (G/CG(H/K)) ∈ X
351
+ (see [18, p. 127–128] or [7, 1, Definition 2.2]). A normal subgroup N of G is said to be X-
352
+ hypercentral in G if N = 1 or N ̸= 1 and every chief factor of G below N is X-central. The
353
+ symbol ZX(G) denotes the X-hypercenter of G, that is, the product of all normal X-hypercentral
354
+ in G subgroups. According to [18, Lemma 14.1] or [7, 1, Theorem 2.6] ZX(G) is the largest
355
+ normal X-hypercentral subgroup of G. If X = N is the class of all nilpotent groups, then
356
+ ZN(G) = Z∞(G) is the hypercenter of G.
357
+ Lemma 4.1. Let n be a natural number.
358
+ Then (N∗)n = (G | h∗(G) ≤ n) = (G | G =
359
+ Z(N∗)n(G)).
360
+ Proof. First part follows from Lemma 3.4. It is well known that the class of all quasinilpotent
361
+ groups is a composition (or Baer-local, or solubly saturated) formation (see [2, Example 2.2.17]).
362
+ According to [18, Theorem 7.9] (N∗)n is a composition formation. Now (N∗)n = (G | G =
363
+ Z(N∗)n(G)) by [7, 1, Theorem 2.6].
364
+ For a normal section H/K of G the subgroup C∗
365
+ G(H/K) = HCG(H/K) is called an inneriser
366
+ (see [2, Definition 1.2.2]). It is the set of all elements of G that induce inner automorphisms
367
+ on H/K.
368
+ From the definition of the generalized Fitting subgroup it follows that it is the
369
+ intersection of innerisers of all chief factors.
370
+ Lemma 4.2. Let N be a normal subgroup of a group G. If N is a direct product of isomorphic
371
+ simple groups and h∗(G/C∗
372
+ G(N)) ≤ k − 1, then F∗
373
+ (k)(G/N) = F∗
374
+ (k)(G)/N.
375
+ Proof. Assume that h∗(G/C∗
376
+ G(N)) ≤ k − 1.
377
+ Let F/N = F∗
378
+ (k)(G/N).
379
+ Then F∗
380
+ (k)(G) ⊆ F.
381
+ Now F/C∗
382
+ F(N) ≃ FC∗
383
+ G(N)/C∗
384
+ G(N) ⊴ G/C∗
385
+ G(N). Therefore h∗(F/C∗
386
+ F(N)) ≤ k − 1. It means
387
+ that h∗(F/C∗
388
+ F(H/K)) ≤ k − 1 for every chief factor H/K of F below N. Hence (H/K) ⋊
389
+ (F/CF(H/K)) ∈ (N∗)k for every chief factor H/K of F below N. It means that N ≤ Z(N∗)k(F).
390
+ Thus F ∈ (N∗)k by Lemma 4.1. So F ⊆ F∗
391
+ (k)(G). Thus F∗
392
+ (k)(G) = F.
393
+ Lemma 4.3. If a group G = AB is a product of mutually permutable quasinilpotent subgroups
394
+ A and B, then h∗(G) ≤ 2.
395
+ Proof. To prove this lemma we need only to prove that if a group G = AB is a product
396
+ of mutually permutable quasinilpotent subgroups A and B, then G ∈ (N∗)2 by Lemma 4.1.
397
+ Assume the contrary. Let G be a minimal order counterexample.
398
+ (1) G has a unique minimal normal subgroup N and G/N ∈ (N∗)2.
399
+ 7
400
+
401
+ Note that G/N is a mutually permutable product of quasinilpotent subgroups (AN/N) and
402
+ (BN/N) by (1) of Lemma 3.5. Hence G/N ∈ (N∗)2 by our assumption. Since (N∗)2 is a
403
+ formation, we see that G has a unique minimal normal subgroup. According to (4) of Lemma
404
+ 3.5 AGBG ̸= 1. WLOG we may assume that G has a minimal normal subgroup N ≤ A.
405
+ (2) N ≤ A ∩ B.
406
+ Suppose that N ∩ B = 1. Then A ≤ CG(N) or B ≤ CG(N) by (3) of Lemma 3.5. If A ≤
407
+ CG(N), then N ⋊ G/CG(N) ≃ N ⋊ B/CB(N) ∈ (N∗)2. If B ≤ CG(N), then N ⋊ G/CG(N) ≃
408
+ N ⋊ A/CA(N) ∈ (N∗) ⊆ (N∗)2 by [2, Corollary 2.2.5]. In both cases N ≤ Z(N∗)2(G). It means
409
+ that G ∈ (N∗)2, a contradiction. Now N ∩ B ̸= 1. Hence N ≤ A ∩ B by (2) of Lemma 3.5.
410
+ (3) N is non-abelian.
411
+ Assume that N is abelian. Since A is quasinilpotent, we see that A/CA(N) is a p-group.
412
+ By analogy B/CB(N) is a p-group. Note that A/CA(N) ≃ ACG(N)/CG(N) and B/CB(N) ≃
413
+ BCG(N)/CG(N). From G = AB it follows that G/CG(N) is a p-group. Since N is a chief
414
+ factor of G, we see that G/CG(N) ≃ 1. So N ≤ Z∞(G) ≤ Z(N∗)2(G). Thus G ∈ (N∗)2, a
415
+ contradiction. It means that N is non-abelian.
416
+ (4) The final contradiction.
417
+ Now N is a direct product of minimal normal subgroups of A. Since A is quasinilpotent, we
418
+ see that every element of A induces an inner automorphism on every minimal normal subgroup
419
+ of A. Hence every element of A induces an inner automorphism on N.
420
+ By analogy every
421
+ element of B induces an inner automorphism on N.
422
+ From G = AB it follows that every
423
+ element of G induces an inner automorphism on N. So NCG(N) = G or G/CG(N) ≃ N. Now
424
+ N ⋊ (G/CG(N)) ∈ (N∗)2. It means that N ≤ Z(N∗)2(G). Thus G ∈ (N∗)2 and h∗(G) ≤ 2, the
425
+ final contradiction.
426
+ Proof of Theorem 1.1(1). Let a group G be a mutually permutable product of subgroups A
427
+ and B. From Theorem 2.6 and Lemma 4.3 it follows that
428
+ max{h∗(A), h∗(B)} − 1 ≤ h∗(G) ≤ max{h∗(A), h∗(B)} + 1.
429
+ Assume that max{h∗(A), h∗(B)} − 1 = h∗(G). WLOG let h∗(A) = h∗(G) − 1. We may
430
+ assume that a group G is the least order group with such properties. Then G has the unique
431
+ minimal normal subgroup N, N ≤ A and h∗(A/N) = h∗(A) − 1 by Lemma 3.7.
432
+ Assume that h∗(A/C∗
433
+ A(N)) < h∗(A) − 1. Then
434
+ F∗
435
+ (h∗(A)−1)(A/N) = F∗
436
+ (h∗(A)−1)(A)/N < A/N
437
+ by Lemma 4.2. It means that h∗(A) = h∗(A/N), a contradiction. Hence h∗(A/C∗
438
+ A(N)) =
439
+ h∗(A) − 1.
440
+ Since G/C∗
441
+ G(N) = (AC∗
442
+ G(N)/C∗
443
+ G(N))(BC∗
444
+ G(N)/C∗
445
+ G(N)) is a mutually permutable products
446
+ of subgroups AC∗
447
+ G(N)/C∗
448
+ G(N) and BC∗
449
+ G(N)/C∗
450
+ G(N) by (1) of Lemma 3.5 and A/C∗
451
+ A(N) ≃
452
+ AC∗
453
+ G(N)/C∗
454
+ A(N), we see that h∗(G/C∗
455
+ G(N)) ≥ h∗(A/C∗
456
+ A(N)) = h∗(A) − 1 by our assumptions.
457
+ Note that F∗(G) ≤ C∗
458
+ G(N). Now h∗(G)−1 = h∗(G/F∗(G)) ≥ h∗(G/C∗
459
+ G(N)) ≥ h∗(A/C∗
460
+ A(N)) =
461
+ h∗(A) − 1. It means that h∗(G) ≥ h∗(A), the final contradiction.
462
+ 5
463
+ Proof of Theorem 1.1(2)
464
+ Lemma 5.1. Let p be a prime and H = Hp. If a group G = AB is a product of mutually
465
+ permutable H-subgroups A and B, then G ∈ H.
466
+ Proof. Assume the contrary. Let G be a minimal order counterexample.
467
+ (1) G has a unique minimal normal subgroup N, G/N ∈ H and N is not p-soluble.
468
+ Note that G/N is a mutually permutable product of H-subgroups (AN/N) and (BN/N) by
469
+ (1) of Lemma 3.5. Hence G/N ∈ H by our assumption. Since H is a formation, we see that G
470
+ 8
471
+
472
+ has a unique minimal normal subgroup. According to (4) of Lemma 3.5 AGBG ̸= 1. WLOG
473
+ we may assume that G has a minimal normal subgroup N ≤ A.
474
+ If N is p-soluble, then Fp(G)/N = Fp(G/N) = G, i.e. So Fp(G) = G. Thus G ∈ H, a
475
+ contradiction.
476
+ (2) N ≤ A ∩ B.
477
+ Suppose that N ∩ B = 1. Note that N is not cyclic by (1). Then B ≤ CG(N) by (3) of
478
+ Lemma 3.5. Hence N ⋊ G/CG(N) ≃ N ⋊ A/CA(N) ∈ H by [2, Corollary 2.2.5]. It means that
479
+ N ≤ ZH(G). Therefore G ∈ H, a contradiction. Now N ∩ B ̸= 1. Hence N ≤ A ∩ B by (2) of
480
+ Lemma 3.5.
481
+ (4) The final contradiction.
482
+ Since N is the unique minimal normal subgroup of G and non-abelian, we see that CG(N) =
483
+ 1. So CA(N) = CB(N) = 1. Hence Rp(A) = Rp(B) = 1. In particular F(A) = F(B) = 1.
484
+ Note that all minimal normal subgroups of A are in N. For B is the same situation. Thus
485
+ N = F∗(A) = F∗(B). So G/N is a mutually permutable product of p-soluble groups. Since the
486
+ class of all p-soluble groups is closed by extensions by p-soluble groups, G/N is p-soluble by (1)
487
+ and (4) of Lemma 3.5. From N ≤ F∗(G) it follows that G ∈ H, the contradiction.
488
+ Proof of Theorem 1.1(2). Let H = Hp and a group G be a mutually permutable product of
489
+ subgroups A and B. First we a going to prove that max{hH(A), hH(B)} = hH(G).
490
+ By Lemmas 3.6 and 4.3 we have
491
+ max{hH(A), hH(B)} − 1 ≤ hH(G) ≤ max{hH(A), hH(B)}.
492
+ Assume that max{hH(A), hH(B)} − 1 = hH(G) for some mutually permutable product G of
493
+ A and B. Assume that G is a minimal order group with this property. WLOG let hH(A) =
494
+ hH(G) − 1. Then G has the unique minimal normal subgroup N, N ≤ A and hH(A/N) =
495
+ hH(A) − 1 by Lemma 3.7.
496
+ If N is p-soluble, then Rp(A/N) = Rp(A)/N. It means that Fp(A/N) = Fp(A)/N. Thus
497
+ hH(A/N) = hH(A), a contradiction.
498
+ It means that Rp(G) = 1. Note that now N is a simple non-abelian group. Since N is a
499
+ unique minimal normal subgroup of G, we see that N = F∗(G). Now hH(G/N) = hH(G) − 1.
500
+ Therefore
501
+ hH(G) − 1 = hH(G/N) ≥ hH(A/N) = hH(A) − 1.
502
+ Thus hH(G) ≥ hH(A), the contradiction.
503
+ We proved that max{hH(A), hH(B)} = hH(G).
504
+ Let G be a mutually permutable product of groups A and B. If A, B are p-soluble, then
505
+ G is p-soluble by (1) and (4) of Lemma 3.5.
506
+ Hence λp(G) = λp(A) = λp(B) = 0.
507
+ Now
508
+ assume that at least one of subgroups A, B is not p-soluble. Then G is not p-soluble by (1)
509
+ and (4) of Lemma 3.5. WLOG let hH(A) ≥ hH(B). Hence A is not p-soluble. We proved
510
+ that hH(A) = hH(G). Note that hH(G) = λp(G), hH(A) = λp(A), hH(B) = λp(B) if B is not
511
+ p-soluble by Lemma 2.5 and 0 = λp(B) < 1 = hH(B) ≤ hH(A) = λp(A) otherwise. Thus
512
+ max{λp(A), λp(B)} = λp(G).
513
+ 6
514
+ Non-Frattini length
515
+ The Frattini subgroup Φ(G) play an important role in the theory of classes of groups. One
516
+ of the useful properties of the Fitting subgroup of a soluble group is that it is strictly greater
517
+ than the Frattini subgroup of the same group. Note that the generalized Fitting subgroup is
518
+ non-trivial in every group but there are groups in which it coincides with the Frattini subgroup.
519
+ That is why the following length seems interesting.
520
+ 9
521
+
522
+ Definition 6.1. Let 1 = G0 ≤ G1 ≤ · · · ≤ G2h = G be a shortest normal series in which for i
523
+ even Gi+1/Gi ≤ Φ(G/Gi), and for i odd the factor Gi+1/Gi is a (non-empty) direct product of
524
+ simple groups. Then h = ˜h(G) will be called the non-Frattini length of a group G.
525
+ Note that if G is a soluble group, then ˜h(G) = h(G). Another reason that leads us to this
526
+ length is the generalization of the Fitting subgroup ˜F(G) introduced by P. Schmid [16] and
527
+ L.A. Shemetkov [17, Definition 7.5] and defined by
528
+ Φ(G) ⊆ ˜F(G) and ˜F(G)/Φ(G) = Soc(G/Φ(G)).
529
+ P. F¨orster [4] showed that ˜F(G) can be defined by
530
+ Φ(G) ⊆ ˜F(G) and ˜F(G)/Φ(G) = F∗(G/Φ(G)).
531
+ Let Φ and ˜F be functorials that assign Φ(G) and ˜F(G) to every group G. Then ˜F = Φ ⋆ F∗. It
532
+ is well known that Φ satisfies (F1) and (F2). Hence ˜F satisfies (F1) and (F2) by Proposition
533
+ 2.2.
534
+ Note that Φ(G/Φ(G)) ≃ 1. By analogy with the proof of Lemma 2.5 one can show that the
535
+ non-Frattini length ˜h(G) of a group G and h˜F(G) coincide for every group G. The following
536
+ theorem shows connections between the non-Frattini length and the generalized Fitting height.
537
+ Theorem 6.1. For any group G holds ˜h(G) ≤ h∗(G) ≤ 2˜h(G). There exists a group H with
538
+ ˜h(H) = n and h∗(H) = 2n for any natural n.
539
+ Proof. Since Φ(G) and Soc(G/Φ(G)) are quasinilpotent, we see that F∗(G) ≤ ˜F(G) ≤ F∗
540
+ (2)(G).
541
+ Now F∗
542
+ (n)(G) ≤ ˜F(n)(G) ≤ F∗
543
+ (2n)(G). Hence if ˜F(n)(G) = G, then F∗
544
+ (n)(G) ≤ G and F∗
545
+ (2n)(G) = G.
546
+ It means ˜h(G) ≤ h∗(G) ≤ 2˜h(G).
547
+ Let K be a group, K1 be isomorphic to the regular wreath product of A5 and K. Note
548
+ that the base B of it is the unique minimal normal subgroup of K1 and non-abelian. According
549
+ to [6], there is a Frattini F3K1-module A which is faithful for K1 and a Frattini extension
550
+ A ֌ K2 ։ K1 such that A
551
+ K1
552
+ ≃ Φ(K2) and K2/Φ(K2) ≃ K1.
553
+ Let denote K2 by f(K). Now f(K)/˜F(f(K)) ≃ K. From the definition of h˜F = ˜h it follows
554
+ that ˜h(f(K)) = ˜h(K) + 1.
555
+ Note that Φ(f(K)) ⊆ F∗(f(K)).
556
+ Assume that Φ(f(K)) ̸= F∗(f(K)).
557
+ It means that
558
+ F∗(f(K)) = ˜F(f(K)) is quasinilpotent. By [9, X, Theorem 13.8] it follows that Φ(f(K)) ⊆
559
+ Z(F∗(f(K))). It means that 1 < B ≤ CK1(A). Thus A is not faithful, a contradiction.
560
+ Thus Φ(f(K)) = F∗(f(K)) and f(K)/F∗(f(K)) ≃ K1.
561
+ Since K1 has a unique mini-
562
+ mal normal subgroup B and it is non-abelian, we see that F∗(K1) = B.
563
+ It means that
564
+ f(K)/F∗
565
+ (2)(f(K)) ≃ K. From the definition of h∗ it follows that h∗(f(K)) = h∗(K) + 2.
566
+ As usual, let f(1)(K) = f(K) and f(i+1)(K) = f(f(i)(K)).
567
+ Then ˜h(f(n)(1)) = n and
568
+ h∗(f(n)(1)) = 2n for any natural n.
569
+ The following proposition directly follows from Theorem 2.6.
570
+ Proposition 6.2. Let a group G = ⟨Ai | 1 ≤ i ≤ n⟩ be the join of its subnormal subgroups Ai.
571
+ Then ˜h(G) ≤ max{˜h(Ai) | 1 ≤ i ≤ n}.
572
+ One of the main differences between the non-Frattini length and the generalized Fitting
573
+ height is that the non-Frattini length of a normal subgroup can be greater than the non-Frattini
574
+ length of a group.
575
+ Example 6.1. Let E ≃ A5. There is an F5E-module V such that R = Rad(V ) is a faithful
576
+ irreducible F5E-module and V/R is an irreducible trivial F5E-module (how to construct such
577
+ module, for example, see [14]). Let G = V ⋋ E. Now Φ(G) = R by [3, B, Lemma 3.14]. Note
578
+ 10
579
+
580
+ that G/Φ(G) = G/R ≃ Z5 × E. So ˜F(G) = G and ˜h(G) = 1. Note that G = V (RE) where V
581
+ and RE are normal subgroups of G. Since V is abelian, we see that ˜h(V ) = 1. Note that R
582
+ is a unique minimal normal subgroup of RE and Φ(RE) = 1. It means that ˜F(RE) = R and
583
+ ˜h(RE) = 2. Thus ˜h(G) < max{˜h(V ), ˜h(RE)} and ˜F does not satisfy (F3).
584
+ Recall [1, Definition 4.1.1] that a group G is called a totally permutable product of its
585
+ subgroups A and B if G = AB and every subgroup of A permutes with every subgroup of B.
586
+ Theorem 6.3. Let a group G = AB be a totally permutable product of subgroups A and B.
587
+ Then
588
+ max{˜h(A), ˜h(B)} − 1 ≤ ˜h(G) ≤ max{˜h(A), ˜h(B)} + 1.
589
+ Proof. If A = 1 or B = 1, then max{˜h(A), ˜h(B)} = ˜h(G). Assume that A, B ̸= 1.
590
+ According to [1, Proposition 4.1.16] A ∩ B ≤ F(G). Hence A ∩ B ≤ F∗(G). Now G =
591
+ G/F∗(G) is a totally permutable product of A = AF∗(G)/F∗(G) and B = BF∗(G)/F∗(G)
592
+ by [1, Corollary 4.1.11]. Note that A ∩ B ≃ 1. According to [1, Lemma 4.2.2] [A, B] ≤ F(G).
593
+ So [A, B] ≤ F∗(G). It means that
594
+ G/F∗(G) = (AF∗(G)/F∗(G)) × (BF∗(G)/F∗(G)).
595
+ Note that for the formation U of all supersoluble groups we have U ⊂ N2 ⊂ (N∗)2. Hence
596
+ if H = H1H2 is a product of totally permutable (N∗)2-subgroups H1 and H2, then H ∈ (N∗)2
597
+ by [1, Theorem 5.2.1]. Analyzing the proof of [1, Theorem 5.2.2] we see that this theorem is
598
+ true not only for saturated formation, but for formations F = (G | G = ZF(G)). In particular,
599
+ it is true for (N∗)2. Thus if H = H1H2 ∈ (N∗)2 is a product of totally permutable subgroups
600
+ H1 and H2, then H1, H2 ∈ (N∗)2. Now (N∗)2 satisfies conditions of [1, Proposition 5.3.9].
601
+ Therefore A ∩ F∗
602
+ (2)(G) = F∗
603
+ (2)(A) and B ∩ F∗
604
+ (2)(G) = F∗
605
+ (2)(B). Note that
606
+ AF∗(G)/F∗(G) ≃ AF∗
607
+ (2)(G)/F∗
608
+ (2)(G) ≃ A/F∗
609
+ (2)(A).
610
+ By analogy BF∗(G)/F∗(G) ≃ B/F∗
611
+ (2)(B). Hence
612
+ G/F∗
613
+ (2)(G) ≃ (A/F∗
614
+ (2)(A)) × (B/F∗
615
+ (2)(B)).
616
+ By Theorem 2.6 and ˜h = h˜F we have ˜h(G/F∗
617
+ (2)(G)) = max{˜h(A/F∗
618
+ (2)(A)), ˜h(B/F∗
619
+ (2)(B))}.
620
+ From ˜F(H) ≤ F∗
621
+ (2)(H) ≤ ˜F(2)(H) and Lemma 2.4 it follows that for any group H ̸= 1 holds
622
+ ˜h(H) − 1 = ˜h(H/˜F(H)) ≥ ˜h(H/F∗
623
+ (2)(H)) ≥ ˜h(H/˜F(2)(H)) ≥ ˜h(H) − 2.
624
+ Therefore
625
+ {˜h(G) − ˜h(G/F∗
626
+ (2)(G)), ˜h(A) − ˜h(A/F∗
627
+ (2)(A)), ˜h(B) − ˜h(B/F∗
628
+ (2)(B))} ⊆ {1, 2}.
629
+ Thus max{˜h(A), ˜h(B)} − 1 ≤ ˜h(G) ≤ max{˜h(A), ˜h(B)} + 1.
630
+ While proving Theorem 6.3 we were not able to answer the following question:
631
+ Question 6.1. Let a group G = AB be a totally permutable product of subgroups A and B. Is
632
+ max{˜h(A), ˜h(B)} ≤ ˜h(G)?
633
+ The following question seems interesting
634
+ Question 6.2. Do there exists a constant h with | max{˜h(A), ˜h(B)} − ˜h(G)| ≤ h for any
635
+ mutually permutable product G = AB of subgroups A and B?
636
+ D.A. Towers [19] defined and studied analogues of F∗(G) and ˜F(G) for Lie algebras. Using
637
+ these subgroups and the radical (of a Lie algebra) one can introduce the generalized Fitting
638
+ height, the non-soluble length and the non-Frattini length of a (finite dimension) Lie algebra.
639
+ Question 6.3. Estimate the generalized Fitting height, the non-soluble length and the non-
640
+ Frattini length of a (finite dimension) Lie algebra that is the sum of its two subalgebras (ideals,
641
+ subideals, mutually or totally permutable subalgebras).
642
+ 11
643
+
644
+ References
645
+ [1] A. Ballester-Bolinches, R. Esteban-Romero, and M. Asaad. Products of Finite Groups. De
646
+ Gruyter, 2010.
647
+ [2] A. Ballester-Bollinches and L. M. Ezquerro. Classes of Finite Groups, volume 584 of Math.
648
+ Appl. Springer Netherlands, 2006.
649
+ [3] K. Doerk and T. O. Hawkes. Finite Soluble Groups, volume 4 of De Gruyter Exp. Math.
650
+ De Gruyter, Berlin, New York, 1992.
651
+ [4] P. F¨orster. Projektive Klassen endlicher Gruppen: IIa. Ges¨attigte Formationen: Ein all-
652
+ gemeiner Satz von Gasch¨utz-Lubeseder-Baer-Typ. Pub, Mat. UAB, 29(2/3):39–76, 1985.
653
+ [5] B. J. Gardner and R. Wiegandt. Radical theory of rings. Marcel Dekker New York, 2003.
654
+ [6] R. L. Griess and P. Schmid. The Frattini module. Arch. Math., 30(1):256–266, 1978.
655
+ [7] W. Guo. Structure Theory for Canonical Classes of Finite Groups. Springer-Verlag, Berlin,
656
+ Heidelberg, 2015.
657
+ [8] P. Hall and G. Higman. On the p-Length of p-Soluble Groups and Reduction Theorems
658
+ for Burnside’s Problem. Proc. London Math. Soc., s3-6(1):1–42, 1956.
659
+ [9] B. Huppert and N. Blackburn. Finite groups III, volume 243 of Grundlehren Math. Wiss.
660
+ Springer-Verlag, Berlin, Heidelberg, 1982.
661
+ [10] E. Jabara. The Fitting length of a product of mutually permutable finite groups. Acta
662
+ Math. Hung., 159(1):206–210, 2019.
663
+ [11] E. I. Khukhro and P. Shumyatsky. Nonsoluble and non-p-soluble length of finite groups.
664
+ Isr. J. Math., 207(2):507–525, 2015.
665
+ [12] E. I. Khukhro and P. Shumyatsky. On the length of finite factorized groups. Ann. Mat.
666
+ Pura Appl., 194(6):1775–1780, 2015.
667
+ [13] E. I. Khukhro and P. Shumyatsky. On the length of finite groups and of fixed points. Proc.
668
+ Amer. Math. Soc., 143(9):3781–3790, 2015.
669
+ [14] V.I. Murashka.
670
+ On one conjecture about supersoluble groups.
671
+ Publ. Math. Debrecen,
672
+ 100(3-4):399–404, 2022.
673
+ [15] B. I. Plotkin. Radicals in groups, operations on group classes and radical classes. In Selected
674
+ questions of algebra and logic, pages 205–244. Nauka, Novosibirsk, 1973. In Russian.
675
+ [16] P. Schmid. ¨Uber die Automorphismengruppen endlicher Gruppen. Arch. Math., 23(1):236–
676
+ 242, 1972.
677
+ [17] L. A. Shemetkov. Formations of finite groups. Nauka, Moscow, 1978. In Russian.
678
+ [18] L. A. Shemetkov and A. N. Skiba. Formations of algebraic systems. Nauka, Moscow, 1989.
679
+ In Russian.
680
+ [19] D. A. Towers. The generalised nilradical of a Lie algebra. J. Algebra, 470:197–218, 2017.
681
+ [20] J. S. Wilson. On the structure of compact torsion groups. Monatsh. Math., 96(1):57–66,
682
+ 1983.
683
+ 12
684
+
FNE0T4oBgHgl3EQfQwDu/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
HdFJT4oBgHgl3EQfFCyj/content/tmp_files/2301.11440v1.pdf.txt ADDED
@@ -0,0 +1,809 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Secure synchronization of artificial neural networks
2
+ used to correct errors in quantum cryptography
3
+ Marcin Niemiec∗, Tymoteusz Widlarz∗, Miralem Mehic†‡
4
+ ∗ AGH University of Science and Technology, al. Mickiewicza 30, 30-059 Krakow, Poland
5
+ † Department of Telecommunications, Faculty of Electrical Engineering, University of Sarajevo,
6
+ Zmaja od Bosne bb, 71000, Sarajevo, Bosnia and Herzegovina
7
+ ‡ VSB – Technical University of Ostrava, 17. listopadu 2172/15, 708 00 Ostrava, Czechia
8
+ ∗niemiec@agh.edu.pl †widlarztymoteusz@gmail.com ‡miralem.mehic@ieee.org
9
+ Abstract—Quantum cryptography can provide a very high
10
+ level of data security. However, a big challenge of this technique
11
+ is errors in quantum channels. Therefore, error correction
12
+ methods must be applied in real implementations. An example is
13
+ error correction based on artificial neural networks. This paper
14
+ considers the practical aspects of this recently proposed method
15
+ and analyzes elements which influence security and efficiency.
16
+ The synchronization process based on mutual learning processes
17
+ is analyzed in detail. The results allowed us to determine the
18
+ impact of various parameters. Additionally, the paper describes
19
+ the recommended number of iterations for different structures of
20
+ artificial neural networks and various error rates. All this aims
21
+ to support users in choosing a suitable configuration of neural
22
+ networks used to correct errors in a secure and efficient way.
23
+ Index Terms—quantum cryptography, key reconciliation, error
24
+ correction, artificial neural networks
25
+ I. INTRODUCTION
26
+ The emergence and intensive development of the field of
27
+ quantum computing has put many cryptography algorithms at
28
+ risk. However, quantum physics also allows to achieve multi-
29
+ ple cryptography tasks. One of the most popular is quantum
30
+ key distribution [1]. Unfortunately, quantum communication
31
+ is not perfect and additional solutions are required to correct
32
+ any errors after the key distribution in the quantum channel.
33
+ Artificial neural networks can be utilized to correct these errors
34
+ [2]. It is a recently proposed solution which provides high
35
+ level of security and efficiency comparing to other existing
36
+ error correction methods.
37
+ This paper analyzes the impact of different neural networks’
38
+ parameters on the synchronization process. These parameters
39
+ influence the number of iterations required as well as the
40
+ security and efficiency of quantum cryptography. Therefore,
41
+ it is important to know which neural network scheme should
42
+ be chosen and which should be avoided. Additionally, the syn-
43
+ chronization requires the number of iterations to be specified.
44
+ Therefore, a recommended number of iterations for a particular
45
+ multiple neural network’s scheme is provided.
46
+ The paper is structured as follows. Related work is re-
47
+ viewed in Section 2. Section 3 presents the basics of quantum
48
+ cryptography, the architecture of the tree parity machine,
49
+ and error correction using this structure of artificial neural
50
+ networks. Analysis of synchronization parameters including
51
+ the recommended number of iterations for typical keys and
52
+ error rates is described in Section 4. Section 5 concludes the
53
+ paper.
54
+ II. RELATED WORK
55
+ The first quantum key distribution (QKD) protocol, intro-
56
+ duced in 1984 by Bennet and Brassard, is BB84 [3]. This
57
+ scheme uses the polarization state of a single photon to
58
+ transmit information. Since then, several other protocols have
59
+ been presented. One of them is the E91 protocol introduced
60
+ in 1991 by Ekerd [4]. It utilizes entangled pairs of photons
61
+ in the QKD process. However, some errors usually appear
62
+ during data exchange in the quantum channel. After the initial
63
+ QKD, there is a specific step: quantum bit error rate (QBER)
64
+ estimation based on the acquired keys. The QBER value is
65
+ usually low [5]. It must to be lower than the chosen threshold
66
+ used to detect the eavesdropper.
67
+ Several methods of correcting error incurred in the quan-
68
+ tum key distribution process have been developed. The first
69
+ described method – BBBSS – was proposed in 1992 [6].
70
+ However, the most popular is the Cascade key reconciliation
71
+ protocol [7]. It is based on multiple random permutations.
72
+ The Winnow protocol, based on the exchange of parity and
73
+ Hamming codes, is another method of error correction in the
74
+ raw key [8]. Its main improvement is the reduction of the
75
+ required communication between both parties. The third most
76
+ popular error reconciliation scheme is the low density parity
77
+ check approach. It offers a significant reduction of exchanged
78
+ information; however, it introduces more computation and
79
+ memory costs than the Cascade and Winnow protocols [7].
80
+ In 2019, another method of error correction in quantum
81
+ cryptography was proposed by Niemiec in [2]. The solution
82
+ uses mutual synchronization of two artificial neural networks
83
+ (ANN) to correct the errors. The tree parity machine (TPM)
84
+ is proposed as a neural network used in this approach. It is
85
+ a well-known structure in cryptography – the synchronization
86
+ of two TPMs can be used as a key exchange protocol. TPMs
87
+ arXiv:2301.11440v1 [cs.CR] 26 Jan 2023
88
+
89
+ cannot be used as a general method to correct a selected error
90
+ because it is not possible to predict the final string of bits after
91
+ the synchronization process. However, it is a desirable feature
92
+ for shared keys which should be random strings of bits.
93
+ III. QUANTUM CRYPTOGRAPHY SUPPORTED BY
94
+ ARTIFICIAL NEURAL NETWORKS
95
+ Symmetric cryptography uses a single key to encrypt and
96
+ decrypt secret messages. Let’s assume that Alice and Bob, the
97
+ two characters used in describing cryptography protocols, are
98
+ using symmetric encryption. The goal is to send information
99
+ from Alice to Bob in a way that provides confidentiality. To
100
+ achieve this, Alice and Bob need to agree on a shared secret
101
+ key. Alice encrypts confidential data using the previously
102
+ chosen key and Bob decrypts it using the same key. The same
103
+ key is applied to encrypt and decrypt the information, hence
104
+ the name: symmetric-key encryption. It is worth mentioning
105
+ only the one-time-pad symmetric scheme has been proven
106
+ secure but it requires a key not smaller than the message being
107
+ sent.
108
+ In general, symmetric-key encryption algorithms – for ex-
109
+ ample the Advanced Encryption Standard (AES) [9] – per-
110
+ form better than asymmetric-key algorithms [10]. However,
111
+ symmetric-key algorithms have an important disadvantage
112
+ compared to asymmetric-key schemes. In the symmetric key
113
+ encryption scheme, the key needs to be safely distributed
114
+ or established between Alice and Bob [11]. The symmetric
115
+ key can be exchanged in a number of ways, including via
116
+ a trusted third party or by direct exchange between involved
117
+ parties. However, both methods introduce some vulnerabili-
118
+ ties, including passive scanning of network traffic. A method
119
+ where the eavesdropper can be easily detected uses quantum
120
+ mechanics to establish keys between Alice and Bob. It is called
121
+ the quantum key distribution protocol.
122
+ A. Quantum key distribution
123
+ Quantum mechanics allows for secure key distribution1
124
+ among network users. Two main principles are the core of
125
+ the security of QKD: an unknown quantum state cannot be
126
+ copied [12], and the quantum state cannot be estimated without
127
+ disturbing it. One of the most popular QKD protocols which
128
+ uses those principles is the BB84 scheme [3].
129
+ The BB84 protocol uses photons with two polarization
130
+ bases: rectilinear or diagonal. Alice encodes a string of bits
131
+ using photons on a randomly chosen basis. After that, all the
132
+ photons are sent through a quantum channel. Bob randomly
133
+ chooses a basis for each photon to decode the binary 0 or
134
+ 1. Alice and Bob’s bases are compared through a public
135
+ communication channel. Each bit where both parties chose the
136
+ same basis should be the same. However, when Bob measures
137
+ the photon in a different basis than Alice, this bit is rejected.
138
+ The remaining bits are the same for both parties and can be
139
+ considered as a symmetric key. Next, the error estimation
140
+ 1In fact, a key is not distributed but negotiated. However, the term
141
+ ’distribution’ is consistently used in this paper to be consistent with the
142
+ commonly accepted name of the technique.
143
+ is performed. Randomly chosen parts of the keys between
144
+ Alice and Bob are compared to compute the QBER value.
145
+ If the comparison results in a high error rate, it means that
146
+ the eavesdropper (Eve) is trying to gain information about
147
+ the exchanged photons. However, the quantum channel is not
148
+ perfect, and errors are usually detected due to disturbance,
149
+ noise in the detectors or other elements. The number of errors
150
+ introduced by the quantum channel’s imperfections must be
151
+ considered while deciding the maximum acceptable error rate.
152
+ The differences between Alice and Bob’s keys need to
153
+ be corrected. Several error correction methods are known.
154
+ BBBSS is the earliest scheme proposed in [6]. It is mainly
155
+ based on parity checks. The most popular method is the
156
+ Cascade protocol [13]. It is an improved version of BBBSS
157
+ and requires less information to be sent between Alice and
158
+ Bob through the public channel. The Cascade protocol and
159
+ its predecessor are based on multiple parity checks. The basic
160
+ idea is that the keys are divided into blocks of a fixed size.
161
+ The number of bits in each block depends on the previously
162
+ calculated QBER value. Alice and Bob compare the parities
163
+ of each block to allow them to find an odd number of errors.
164
+ If errors are detected in a given block, it is split into two.
165
+ The process is repeated recursively for each block until all
166
+ errors are corrected. It concludes a single iteration after which
167
+ Alice and Bob have keys with an even number of errors or
168
+ without any errors. Before performing the following iterations,
169
+ the keys are scrambled, and the size of the block is increased.
170
+ The number of iterations is predetermined. As a result of this
171
+ process, Alice and Bob should have the same keys. However,
172
+ it is not always the case. A number of iterations or block sizes
173
+ can be chosen incorrectly and cause failure in error correction.
174
+ Additionally, the algorithm performs multiple parity checks
175
+ over the public channel, which can be intercepted by an
176
+ eavesdropper (Eve). As a result, Eve can construct a partial
177
+ key. Alice and Bob should discard parts of their keys to
178
+ increase the lost security. This reduces the performance of
179
+ this method since the confidential keys must be shortened in
180
+ the process. Another error reconciliation method is based on
181
+ mutual synchronization of artificial neural networks.
182
+ B. Tree parity machine
183
+ An artificial neural network (ANN) is a computing system
184
+ inspired by biological neural networks [14]. ANNs are used
185
+ to recognize patterns and in many other solutions in the fields
186
+ of machine learning. ANNs consist of multiple connected
187
+ nodes (artificial neurons), with each neuron representing a
188
+ mathematical function [15]. These nodes are divided into three
189
+ types of layers: the first (input) layer, at least one hidden layer,
190
+ and the output layer. The connections between neurons in each
191
+ layer can be characterized by weights.
192
+ In cryptography, the most commonly used neural network is
193
+ the tree parity machine (TPM) [16]. A scheme of this model
194
+ is presented in Fig. 1. There are K ×N input neurons, divided
195
+ into K groups. There is a single hidden layer with K nodes.
196
+ Each of these nodes has N inputs. The TPM has a single
197
+ output neuron. The connections between input neurons and
198
+
199
+ hidden layer neurons are described by weights W – integers
200
+ in the range [−L, L], thus L is the maximum and −L is
201
+ the minimum weight value. The values of σ characterize the
202
+ connections between the hidden layer neurons and an output
203
+ neuron. The output value of the TPM is described by τ.
204
+ The value of σ is calculated using the following formulas:
205
+ σk = sgn(
206
+ N
207
+
208
+ n=1
209
+ xkn ∗ wkn)
210
+ (1)
211
+ sgn(z) =
212
+
213
+ −1
214
+ z ≤ 0
215
+ 1
216
+ z > 0
217
+ (2)
218
+ Due to the usage of the presented signum function, σ can take
219
+ two values: 1 or −1. The output value of TPM is calculated
220
+ as:
221
+ τ =
222
+ K
223
+
224
+ k=1
225
+ σk
226
+ (3)
227
+ This neural network has two possible outcomes: 1 or −1.
228
+ For the TPM structure, multiple learning algorithms are
229
+ proposed. Most popular are Hebbian, anti-Hebbian, and ran-
230
+ dom walk. The leading is the Hebbian rule [17]. The Hebbian
231
+ algorithm updates ANN weights in the following manner:
232
+ w∗
233
+ kn = vL(wkn + xkn ∗ σk ∗ θ(σk, τ))
234
+ (4)
235
+ where θ limits the impact of hidden layer neurons whose value
236
+ was different than τ:
237
+ θ(σk, τ) =
238
+
239
+ 0
240
+ if σk ̸= τ
241
+ 1
242
+ if σk = τ
243
+ (5)
244
+ The vL function makes sure that the new weights are kept
245
+ within the [−L, L] range:
246
+ vL(z) =
247
+
248
+
249
+
250
+
251
+
252
+ −L
253
+ if z ≤ −L
254
+ z
255
+ if − L < z < L
256
+ L
257
+ if z ≥ L
258
+ (6)
259
+ The TPM structure allows for mutual learning of the two
260
+ neural networks [18], primarily based on updating weights
261
+ only when the outputs from both neural networks are the same.
262
+ The input values are random and the same for both Alice and
263
+ Bob’s TPMs. Inputs are updated in each iteration. The security
264
+ of this process relies on the fact that cooperating TPMs can
265
+ achieve convergence significantly faster than Eve’s machine,
266
+ which can update weights less frequently. The TPM is most
267
+ commonly used in cryptography to exchange a secret key. This
268
+ usage is defined as neural cryptography [19]. Alice and Bob
269
+ mutually synchronize their TPMs to achieve the same weights.
270
+ After the synchronization process, these weights provide a
271
+ secure symmetric key.
272
+ C. Error correction based on TPMs
273
+ TPMs can be utilized during the error correction process
274
+ in quantum cryptography [2]. The neural network’s task is to
275
+ correct all errors to achieve the same string of confidential bits
276
+ at both endpoints. Firstly, Alice and Bob prepare their TPMs.
277
+ The number of neurons in the hidden layer (K) and the number
278
+ of input neurons (N) is determined by Alice and passed on
279
+ to Bob. The value L must also be agreed between the users.
280
+ The keys achieved using the QKD protocol are changed into
281
+ integer values in the range [−L, L]. These values are used
282
+ in the appropriate TPMs as weights between neurons in the
283
+ input layer and the hidden layer. Since Alice’s string of bits
284
+ is similar to Bob’s (QBER is usually not high), the weights
285
+ in the created TPMs are almost synchronized. At this point,
286
+ Alice and Bob have constructed TPMs with the same structure
287
+ but with a few differences in the weight values.
288
+ After establishing the TPM structure and changing bits to
289
+ weights, the synchronization process starts. It consists of mul-
290
+ tiple iterations, repeated until common weights are achieved
291
+ between Alice and Bob. A single iteration starts from Alice
292
+ choosing the input string and computing the result using the
293
+ TPM. After that, the generated input string is passed on to Bob,
294
+ who computes the output of his TPM using the received input.
295
+ Then, the results are compared. If the outputs of both TPMs
296
+ match, the weights can be updated. Otherwise, the process is
297
+ repeated with a different input string.
298
+ After an appropriate number of iterations, the TPMs are
299
+ synchronized and Alice and Bob can change the weights back
300
+ into a string of bits. The resulting bits are the same. However,
301
+ the privacy amplification process after error correction is still
302
+ recommended [20]. The reduction of the key protecting Alice
303
+ and Bob from information leakage is defined as [2]:
304
+ Z = log2L+12i
305
+ (7)
306
+ where i is the number of TPM iterations.
307
+ This usage of TPM is safer than the neural cryptography
308
+ solution, because weights are similar before the synchroniza-
309
+ tion. Therefore, significantly fewer iterations are required to
310
+ achieve convergence than the randomly initialized weights
311
+ in key establishing algorithms. It is worth mentioning this
312
+ method of error correction is characterized by high efficiency,
313
+ e.g. requires approximately 30% less iterations than Cascade
314
+ algorithm [2].
315
+ IV. ANALYSIS OF THE SYNCHRONIZATION PROCESS
316
+ The crucial decision regarding the error detection approach
317
+ based on TPMs is the number of iterations during the syn-
318
+ chronization process. This value should be as low as possible
319
+ for security reasons. However, it cannot be too low, since
320
+ neural networks will not be able to correct all errors in the
321
+ key otherwise. It is the user’s responsibility to select the
322
+ appropriate value for the error correction. The main objective
323
+ of the analysis is to determine the impact of various neural
324
+ network parameters on the synchronization process. Another
325
+ goal is to provide a recommended number of iterations for
326
+ users.
327
+
328
+ X11
329
+ X12
330
+ X13
331
+ X1N
332
+ X21
333
+ X22
334
+ X23
335
+ X2N
336
+ XK1
337
+ XK2
338
+ XK3
339
+ XKN
340
+
341
+
342
+
343
+
344
+ W11
345
+ W1N
346
+ W21
347
+ W2N
348
+ WKN= {-L, … ,L}
349
+ WK1
350
+ σK= {-1, 1}
351
+ σ2
352
+ σ1
353
+ τ={-1, 1}
354
+ Fig. 1. Model of tree parity machine.
355
+ A. Testbed
356
+ The experiments require an application to simulate the error
357
+ correction process based on artificial neural networks. The
358
+ application for correcting errors arising in quantum key distri-
359
+ bution was written in Python and uses the NumPy package – a
360
+ library for scientific computing which provides fast operations
361
+ on arrays required by the TPM. The functions provided by
362
+ NumPy satisfy all necessary calculations to achieve neural
363
+ network convergence. Synchronization of TPMs is performed
364
+ over sockets to allow real-world usage of this tool. The
365
+ Hebbian learning algorithm for updating weights is used.
366
+ The developed application makes it possible to correct errors
367
+ in the keys using quantum key distribution protocols. The users
368
+ are also able to correct simulated keys with the chosen error
369
+ rate. It helps if users do not have strings of bits created by a
370
+ real QKD system. An important feature of the tool is its ability
371
+ to select neural network parameters. The user can personalize
372
+ the synchronization process, starting from the key length and
373
+ error rate. The least sufficient number of bits was used for
374
+ translation into a single integer (values of the weights must be
375
+ in the range [−L, L]). It was demonstrated that the number of
376
+ hidden neurons and the number of inputs depend on the chosen
377
+ key length and L value. Therefore, users need to select these
378
+ parameters taking into account the requirements and needs.
379
+ During the experiments the minimum number of returned
380
+ required iterations for a single TPM configuration was set
381
+ to 200. The maximum number of iterations was limited to
382
+ 1000. Additionally, the maximum number of retries in a single
383
+ iteration was limited to 10 to speed up the simulation process.
384
+ Finally, 1880 different scenarios were analyzed. All possible
385
+ TPM configurations for key lengths varying between 100 and
386
+ 700 with a 100 bit step are available. Moreover, the data is
387
+ available for other keys with lengths varying between 128 and
388
+ 352 with an 8 bit step. Between 350 and 500 synchronizations
389
+ were performed for each TPM. It was assumed that this
390
+ number of iterations is sufficient to achieve convergence.
391
+ B. Recommended number of iterations
392
+ To obtain the recommended number of iterations of TPMs
393
+ for successful error correction, the sum of means and standard
394
+ deviations of the results was calculated. The median and
395
+ variance values were calculated as well for comparison. The
396
+ full results are available online2. The selected part – the neural
397
+ network configurations where the key length equals 256 bits
398
+ with the recommended number of iterations – is presented in
399
+ Tab. I.
400
+ Fig. 2. Histogram for number of iterations (TPM with a 256 bit key, N = 16,
401
+ K = 4, L = 4, QBER = 3%).
402
+ 2Recommended numbers of iterations for 1880 different scenarios –
403
+ TPM structures and QBER values – are available from: http://kt.agh.edu.pl/
404
+ ∼niemiec/ICC-2023 This is mainly based on possible key lengths which vary
405
+ between 128 and 500 bits with 4 bit steps. Additionally, keys with lengths
406
+ between 500 and 700 with 100 bit steps are included.
407
+
408
+ 180
409
+ 160
410
+ 140
411
+ 120
412
+ Count
413
+ 100
414
+ 80
415
+ 60
416
+ 40
417
+ 20
418
+ 0
419
+ [11, 69]
420
+ (69, 127)
421
+ (127, 185)
422
+ (185, 243)
423
+ (243, 301)
424
+ (301, 359)
425
+ (359, 400)
426
+ > 400
427
+ Number of iterationsTABLE I
428
+ RECOMMENDED NUMBER OF ITERATIONS FOR TPMS GENERATED FOR
429
+ 256 BIT KEYS
430
+ Weights
431
+ range
432
+ {−L, L}
433
+ QBER
434
+ [%]
435
+ Number
436
+ of
437
+ inputs
438
+ to a single
439
+ hidden
440
+ neuron
441
+ [N]
442
+ Number
443
+ of
444
+ hidden
445
+ neurons
446
+ [K]
447
+ Recommended
448
+ number
449
+ of
450
+ iterations
451
+ 2
452
+ 1
453
+ 2
454
+ 43
455
+ 154
456
+ 2
457
+ 1
458
+ 43
459
+ 2
460
+ 51
461
+ 2
462
+ 2
463
+ 2
464
+ 43
465
+ 179
466
+ 2
467
+ 2
468
+ 43
469
+ 2
470
+ 59
471
+ 2
472
+ 2
473
+ 86
474
+ 1
475
+ 24
476
+ 2
477
+ 3
478
+ 2
479
+ 43
480
+ 188
481
+ 2
482
+ 3
483
+ 43
484
+ 2
485
+ 64
486
+ 2
487
+ 3
488
+ 86
489
+ 1
490
+ 25
491
+ 3
492
+ 1
493
+ 2
494
+ 43
495
+ 218
496
+ 3
497
+ 1
498
+ 43
499
+ 2
500
+ 71
501
+ 3
502
+ 1
503
+ 86
504
+ 1
505
+ 33
506
+ 3
507
+ 2
508
+ 2
509
+ 43
510
+ 309
511
+ 3
512
+ 2
513
+ 43
514
+ 2
515
+ 94
516
+ 3
517
+ 2
518
+ 86
519
+ 1
520
+ 39
521
+ 3
522
+ 3
523
+ 2
524
+ 43
525
+ 325
526
+ 3
527
+ 3
528
+ 43
529
+ 2
530
+ 97
531
+ 3
532
+ 3
533
+ 86
534
+ 1
535
+ 40
536
+ 4
537
+ 1
538
+ 2
539
+ 32
540
+ 450
541
+ 4
542
+ 1
543
+ 4
544
+ 16
545
+ 496
546
+ 4
547
+ 1
548
+ 8
549
+ 8
550
+ 301
551
+ 4
552
+ 1
553
+ 16
554
+ 4
555
+ 176
556
+ 4
557
+ 1
558
+ 32
559
+ 2
560
+ 125
561
+ 4
562
+ 2
563
+ 2
564
+ 32
565
+ 554
566
+ 4
567
+ 2
568
+ 4
569
+ 16
570
+ 701
571
+ 4
572
+ 2
573
+ 8
574
+ 8
575
+ 483
576
+ 4
577
+ 2
578
+ 16
579
+ 4
580
+ 264
581
+ 4
582
+ 2
583
+ 32
584
+ 2
585
+ 152
586
+ 4
587
+ 3
588
+ 2
589
+ 32
590
+ 609
591
+ 4
592
+ 3
593
+ 4
594
+ 16
595
+ 772
596
+ 4
597
+ 3
598
+ 8
599
+ 8
600
+ 542
601
+ 4
602
+ 3
603
+ 16
604
+ 4
605
+ 302
606
+ 4
607
+ 3
608
+ 32
609
+ 2
610
+ 164
611
+ Fig. 2 shows the histogram of data gathered for a sin-
612
+ gle neural network configuration. The distribution is right-
613
+ skewed. The mean value is greater than the median. It is a
614
+ common characteristic for other tested TPM configurations. If
615
+ the distribution is not positively skewed, it is symmetrical.
616
+ The recommended number of iterations for the presented
617
+ configuration, according to Tab. I, equals 302. It is based on
618
+ the sum of the mean and standard deviation values. For all
619
+ presented TPM configurations, this sum gives an 84% chance
620
+ of successful synchronization, assuming a normal distribution
621
+ of results. For the right-skewed distribution, similar to the one
622
+ presented in Fig. 2, the probability of success is higher. The
623
+ 85-th percentile for the given set is equal to 276 – less than
624
+ the proposed value. In this case, after choosing the suggested
625
+ number of iterations the user has more than an 88% chance
626
+ of success.
627
+ Knowing the lowest required number of iterations is im-
628
+ portant because it reduces the risk of a successful attack by
629
+ Eve. The attacker could create independent TPMs and try
630
+ to synchronize one of them with Alice or Bob’s machine.
631
+ The recommended number of iterations increases the security
632
+ of this solution because Alice and Bob require far fewer
633
+ iterations to synchronize, compared to Alice (or Bob) and Eve
634
+ synchronizing using random weights.
635
+ C. Impact of TPM structures
636
+ The results of simulations allow us to analyze how TPM
637
+ structures affect the number of required iterations during the
638
+ synchronization process. Fig. 3 shows the number of required
639
+ iterations depending on the K and N parameters. It shows
640
+ two different TPM configurations: one with a 144 bit key and
641
+ another with a 216 bit key. These configurations were chosen
642
+ due to having a similar number of possible K and N pairs.
643
+ For a given key length, L value and error rate there is a limited
644
+ number of possible N and K values. The K value changes
645
+ in inverse proportion to the N value. As presented in Fig.
646
+ 3 the speed of the TPM synchronization process depends on
647
+ the neural network structure (N and K values). The number
648
+ of required iterations increases alongside the higher number
649
+ of neurons in the hidden layer (K). The trend is similar for
650
+ both presented TPMs. After achieving a certain threshold,
651
+ the number of recommended iterations increases slowly. The
652
+ results fit the logarithmic trend line. It means that after a
653
+ certain K value, increasing this parameter further does not
654
+ affect the synchronization speed as much as under a certain
655
+ threshold.
656
+ Fig. 3. Number of iterations for TPMs with 144 and 216 bit keys for different
657
+ K value.
658
+ Other configurations of the selected TPMs were studied
659
+ based on the increasing error rate of the keys. Two configura-
660
+ tions with 128 and 256 bit keys were tested. The average of
661
+ every possible configuration of the recommended number of it-
662
+ erations was calculated for different QBER values. The results
663
+ are presented in Fig. 4. This confirms that a greater number
664
+ of errors results in a higher average number of recommended
665
+ iterations. It confirms the applicability of TPMs to correct
666
+ errors emerging in quantum key distribution, where the error
667
+ rate should not be higher than a few percent. Therefore, the
668
+ eavesdropper needs more iterations to synchronize its TPM.
669
+ Additionally, it was verified that value L has an exponential
670
+ impact on the average recommended number of iterations. The
671
+ data was gathered using a similar approach to the study with
672
+
673
+ 180
674
+ 160
675
+ Recommended number of iterations
676
+ 140
677
+ 120
678
+ 100
679
+ 144 bits
680
+ 80
681
+ 216 bits
682
+ 60
683
+ 40
684
+ 20
685
+ 0
686
+ 6
687
+ 8
688
+ 9
689
+ 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
690
+ Number of hidden layer neurons (K)Fig. 4.
691
+ Number of iterations for TPMs with 128 and 256 bit keys depended
692
+ on the QBER.
693
+ the impact of QBER. The average recommended number of
694
+ iterations of each configuration for a given L was calculated.
695
+ Fig. 5 shows the exponential trend line. It is worth mentioning
696
+ that the impact of L value on the synchronization time is
697
+ significant.
698
+ Fig. 5.
699
+ Number of iterations for TPMs with 128 and 256 bit keys dependent
700
+ on the L value.
701
+ It is the user’s responsibility to choose the best possible
702
+ configuration for a given key length and QBER value. The
703
+ analysis shows that the L value should be chosen carefully
704
+ since it exponentially affects the required number of iterations.
705
+ Additionally, the choice of the K value should be made
706
+ with caution due to its logarithmic impact on the number of
707
+ iterations.
708
+ V. SUMMARY
709
+ The analysis of the TPM synchronization process used for
710
+ error correction purposes was presented in this paper. It shows
711
+ that the parameters of the TPM structure have an impact on
712
+ the synchronization time and security of this error correction
713
+ method. However, different parameters of artificial neural
714
+ networks have different effects. Therefore, users should be
715
+ aware of how to choose the configuration of neural networks
716
+ used to correct errors in a secure and efficient way. One of
717
+ the deciding factors which need to be selected is the number
718
+ of iterations. The paper describes the recommended number
719
+ of iterations for different TPM structures and QBER values
720
+ to assist users in this step. The numbers recommended by the
721
+ authors are as low as possible but with a high probability of
722
+ successful synchronization to ensure secure and efficient error
723
+ correction based on artificial neural networks.
724
+ ACKNOWLEDGMENT
725
+ This work was supported by the ECHO project which has
726
+ received funding from the European Union’s Horizon 2020
727
+ research and innovation programme under the grant agreement
728
+ no. 830943.
729
+ REFERENCES
730
+ [1] S. Abidin, A. Swami, E. Ramirez-As´ıs, J. Alvarado-Tolentino, R. K.
731
+ Maurya, and N. Hussain, “Quantum cryptography technique: A way
732
+ to improve security challenges in mobile cloud computing (mcc),”
733
+ Materials Today: Proceedings, vol. 51, pp. 508–514, 2022.
734
+ [2] M. Niemiec, “Error correction in quantum cryptography based on
735
+ artificial neural networks,” Quantum Information Processing, 2019.
736
+ [3] C. Bennett and G. Brassard, “Quantum cryptography: Public key dis-
737
+ tribution and coin tossing,” Theoretical Computer Science - TCS, pp.
738
+ 175–179, 1984.
739
+ [4] A. Ekert, “Quantum cryptography based on Bell’s theorem,” Phys. Rev.
740
+ Lett., pp. 661–663, 1991.
741
+ [5] M. Khodr, “Evaluations of quantum bit error rate using the three stage
742
+ multiphoton protocol,” 2017 International Conference on Electrical and
743
+ Computing Technologies and Applications (ICECTA), pp. 1–4, 2017.
744
+ [6] C. Bennett, F. Bessette, G. Brassard, L. Salvail, and J. Smolin, “Exper-
745
+ imental quantum cryptography,” Journal of Cryptology, pp. 3–28, 1992.
746
+ [7] M. Mehic, M. Niemiec, H. Siljak, and M. Voznak, “Error reconcilia-
747
+ tion in quantum key distribution protocols,” Reversible Computation:
748
+ Extending Horizons of Computing: Selected Results of the COST Action
749
+ IC1405, pp. 222–236, 2020.
750
+ [8] W. T. Buttler, S. K. Lamoreaux, J. R. Torgerson, G. H. Nickel, C. H.
751
+ Donahue, and C. G. Peterson, “Fast, efficient error reconciliation for
752
+ quantum cryptography,” Phys. Rev. A, 2003.
753
+ [9] H. Delfs and H. Knebl, “Symmetric-key encryption,” Introduction to
754
+ Cryptography: Principles and Applications, pp. 11–31, 2007.
755
+ [10] M. Panda, “Performance analysis of encryption algorithms for security,”
756
+ 2016 International Conference on Signal Processing, Communication,
757
+ Power and Embedded System (SCOPES), pp. 278–284, 2016.
758
+ [11] M. Umaparvathi and D. K. Varughese, “Evaluation of symmetric en-
759
+ cryption algorithms for manets,” 2010 IEEE International Conference
760
+ on Computational Intelligence and Computing Research, pp. 1–3, 2010.
761
+ [12] W. K. Wootters and W. H. Zurek, “A single quantum cannot be cloned,”
762
+ Nature, pp. 802–803, 1982.
763
+ [13] G. Brassard and L. Salvail, “Secret-key reconciliation by public discus-
764
+ sion,” Advances in Cryptology, pp. 410–423, 1994.
765
+ [14] J. Hopfield, “Artificial neural networks,” IEEE Circuits and Devices
766
+ Magazine, pp. 3–10, 1988.
767
+ [15] P. P. Hadke and S. G. Kale, “Use of neural networks in cryptography:
768
+ A review,” in 2016 World Conference on Futuristic Trends in Research
769
+ and Innovation for Social Welfare (Startup Conclave), 2016, pp. 1–4.
770
+ [16] A. Sarkar, “Secure exchange of information using artificial intelligence
771
+ and chaotic system guided neural synchronization,” Multimedia Tools
772
+ and Applications, vol. 80, pp. 1–31, 05 2021.
773
+ [17] M. Aleksandrov and Y. Bashkov, “Factors affecting synchronization time
774
+ of tree parity machines in cryptography,” 2020 IEEE 2nd International
775
+ Conference on Advanced Trends in Information Theory (ATIT), pp. 108–
776
+ 112, 2020.
777
+ [18] R. Metzler, W. Kinzel, and I. Kanter, “Interacting neural networks,”
778
+ Phys. Rev. E, pp. 2555–2565, 2000.
779
+ [19] W. Kinzel and I. Kanter, “Neural cryptography,” Proceedings of the 9th
780
+ International Conference on Neural Information Processing, pp. 1351–
781
+ 1354, 2002.
782
+ [20] C. Bennett, G. Brassard, and J. Robert, “Privacy amplification by public
783
+ discussion,” SIAM J. Comput., p. 210–229, 1988.
784
+
785
+ 300
786
+ iterations
787
+ 250
788
+ I number of i
789
+ 200
790
+ 150
791
+ ●128 bits
792
+ ●256 bits
793
+ 100
794
+ 50
795
+ 0
796
+ 1
797
+ 2
798
+ 3
799
+ QBER [%]400
800
+ 300
801
+ 200
802
+ ·128 bits
803
+ ·256 bits
804
+ 100
805
+ 0
806
+ 2
807
+ 3
808
+ 4
809
+ 7
HdFJT4oBgHgl3EQfFCyj/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
MtFJT4oBgHgl3EQfzS0r/content/2301.11642v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b6e90f0ee6cae3b5f4c76db62894a3e01a673b792d933d9b8dc274f1fb769cd
3
+ size 573711
NtFQT4oBgHgl3EQfWjbh/content/tmp_files/2301.13305v1.pdf.txt ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.13305v1 [math.CO] 30 Jan 2023
2
+ Graph-Codes
3
+ Noga Alon ∗
4
+ Abstract
5
+ The symmetric difference of two graphs G1, G2 on the same set of vertices [n] =
6
+ {1, 2, . . . , n} is the graph on [n] whose set of edges are all edges that belong to exactly
7
+ one of the two graphs G1, G2. Let H be a fixed graph with an even (positive) number
8
+ of edges, and let DH(n) denote the maximum possible cardinality of a family of graphs
9
+ on [n] containing no two members whose symmetric difference is a copy of H. Is it
10
+ true that DH(n) = o(2(n
11
+ 2)) for any such H? We discuss this problem, compute the
12
+ value of DH(n) up to a constant factor for stars and matchings, and discuss several
13
+ variants of the problem including ones that have been considered in earlier work.
14
+ 1
15
+ Introduction
16
+ 1.1
17
+ The problem
18
+ The symmetric difference of two graph G1 = (V, E1) and G2 = (V, E2) on the same set of
19
+ vertices V is the graph (V, E1 ⊕ E2) where E1 ⊕ E2 is the symmetric difference between
20
+ E1 and E2, that is, the set of all edges that belong to exactly one of the two graphs. Put
21
+ V = [n] = {1, 2, . . . , n} and let H be a family of graphs on the set of vertices [n] which is
22
+ closed under isomorphism. A collection of graphs F on [n] is called an H-(graph)-code if
23
+ it contains no two members whose symmetric difference is a graph in H. For the special
24
+ case that H contains all copies of a single graph H on [n] this is called an H-code. Here
25
+ we are interested in the maximum possible cardinality of such codes for various families
26
+ H. Let DH(n) denote this maximum, and let
27
+ dH(n) = DH(n)
28
+ 2(n
29
+ 2)
30
+ denote the maximum possible fraction of the total number of graphs on [n] in an H-code.
31
+ If H consists of all graphs isomorphic to one graph H, we denote dH(n) by dH(n). Note
32
+ that if H consists of all graphs with less than d edges, then DH(n) is simply the maximum
33
+ ∗Princeton University, Princeton, NJ, USA and Tel Aviv University, Tel Aviv, Israel.
34
+ Email:
35
+ nalon@math.princeton.edu. Research supported in part by NSF grant DMS-2154082 and by USA-Israel
36
+ BSF grant 2018267.
37
+ 1
38
+
39
+ possible cardinality of a binary code of length
40
+ �n
41
+ 2
42
+
43
+ and minimum distance at least d. This
44
+ motivates the terminology “graph-codes” used here.
45
+ The case H = K where K is the family of all cliques is of particular interest. This case
46
+ is motivated by a conjecture of Gowers raised in his blog post [8] in 2009 and is discussed
47
+ briefly in the comments of that blog. If H consists of all graphs with independence number
48
+ at most 2, then dH(n) ≥ 1/8 for all n ≥ 3, as shown by the family of all graphs on [n]
49
+ containing a triangle on the set of vertices {1, 2, 3}. An interesting result of Ellis, Filmus
50
+ and Friedgut [5], settling a conjecture of Simonovits and S´os, asserts that this is tight
51
+ for all n ≥ 3. The corresponding result, that dH′(n) = 1/26 for all n ≥ 4, where H′ is
52
+ the family of all graphs with independence number at most 3, is proved in [3]. A more
53
+ systematic study of the parameters DH(n) and dH(n) for various families of graphs H
54
+ appears in the recent paper [1]. The families H considered in this work include the family
55
+ of all disconnected graphs, the family of all graphs that are not 2-connected, the family
56
+ of all non-Hamiltonian graphs and the family of all graphs that contain or do not contain
57
+ a spanning star. Additional families studied are all graphs that contain an induced or
58
+ non-induced copy of a fixed graph T, or all graphs that do not contain such a subgraph.
59
+ In this note we focus on the case that H consists of a single graph H and the case that
60
+ H is the family of all cliques, or all cliques up to a prescribed size. Note that trivially,
61
+ if every member of H has an odd number of edges then dH(n) ≥ 1
62
+ 2 as the family of all
63
+ graphs on [n] with an even number of edges forms an H-code.
64
+ This suggests the following intriguing question.
65
+ Question 1.1. Let H be a family of graphs closed under isomorphism. Is it true that
66
+ dH(n) tends to 0 as n tends to infinity if and only if H contains a graph with an even
67
+ number of edges ? Equivalently: is it true that for any fixed graph H with an even number
68
+ of edges, dH(n) tends to 0 as n tends to infinity ?
69
+ We also study the linear variant of these problems, where the H-codes considered are
70
+ restricted to linear subspaces, that is, to families of graphs on [n] closed under symmetric
71
+ difference.
72
+ 1.2
73
+ Results
74
+ Recall that K is the family of all cliques. Let K(r) denote the set of all cliques on at most
75
+ r vertices. Let K1,t denote the star with t edges and let Mt denote the matching of t edges.
76
+ Theorem 1.2. For every positive integer k,
77
+ dK1,2k(n) = Θk(1/nk) and
78
+ dM1,2k(n) = Θk(1/nk).
79
+ Proposition 1.3. For every integer r ≥ 1,
80
+ dK(4r+3)(n) ≥ Ω( 1
81
+ nr ).
82
+ 2
83
+
84
+ Proposition 1.4. For the family K of all cliques, dK(n) ≥
85
+ 1
86
+ 2[n/2] .
87
+ Proposition 1.5. Let H be a fixed graph obtained from two copies of a graph H′ by
88
+ identifying the vertices of an independent set of H′. Then
89
+ dH(n) ≤ |V (H)|
90
+ n
91
+ for all n ≥ |V (H)|.
92
+ In particular, dH(n) tends to 0 as n tends to infinity.
93
+ Remark:
94
+ all lower bounds are proved by exhibiting proper colorings of the relevant
95
+ Cayley graphs, and in all cases the constructed family is an affine space over Z2. Using
96
+ a simple Ramsey-theoretic argument it is not difficult to show that for an affine space
97
+ the maximum possible cardinality obtained is at most a fraction O(log log n/ log n) of all
98
+ graphs on n vertices whenever the defining family contains a fixed graph with an even
99
+ number of edges.
100
+ Since all lower bounds are obtained by what may be called linear graph-codes one can
101
+ study this separately, as done for standard error correcting codes. For the family of all
102
+ cliques K we get here an exact result (strengthening the assertion of Proposition 1.4).
103
+ Theorem 1.6. For any n ≥ 2, the minimum possible co-dimension of a linear space of
104
+ graphs on n vertices that contains no member of K is exactly [n/2].
105
+ 2
106
+ Proofs
107
+ 2.1
108
+ Upper bounds
109
+ For a family of graphs H and an integer n, the Cayley graph C(n, H) is the graph whose
110
+ vertices are all graphs on the n vertices [n], where two are adjacent iff their symmetric
111
+ difference is a member of H. This is clearly a Cayley graph over the elementary abelian
112
+ 2-group ZN
113
+ 2 with N =
114
+ �n
115
+ 2
116
+
117
+ . The function DH(n) is just the independence number of this
118
+ graph, dH(n) is the so called independence ratio. Since the graph C(n, H) is vertex tran-
119
+ sitive, its independence ratio is exactly the reciprocal of its fractional chromatic number.
120
+ In order to prove an upper bound of α for its independence ratio it suffices to exhibit a
121
+ set S of vertices that contains no independent set of size larger than α|S|. This applies
122
+ also to weighted sets of vertices, but we will not use weights here.
123
+ Proof of Proposition 1.5: Let a + b denote the number of vertices of H′ where b is the
124
+ size of its independent set so that H is obtained from two copies of H′ by identifying the
125
+ vertices in this independent set. Thus the number of vertices of H is 2a + b. Consider
126
+ the following set of m = ⌊(n − b)/a⌋ copies of H′ on subsets of the vertex set [n]. All of
127
+ 3
128
+
129
+ them contain the same independent set on the vertices {n − b + 1, n − b + 2, . . . , n}, and
130
+ the additional vertices of copy number i are the vertices (i − 1)a + 1, (i − 1)a + 2, . . . , ia},
131
+ where 1 ≤ i ≤ m. Each of these copies can be viewed as a vertex of the Cayley graph
132
+ C = C(n, {H}). Since the symmetric difference of every pair of such copies forms a copy
133
+ of H, this set forms a clique of size m in C, implying that dH(n) ≤ 1
134
+ m ≤ |V (H)|/n.
135
+
136
+ The proofs of Theorem 1.2 for stars and for matchings are very similar. We describe the
137
+ proof for stars and briefly mention the modification needed for matchings. The upper
138
+ bound in Theorem 1.2 for the star K1,1 is a special case of the result above (with H′ being
139
+ a single edge). The upper bound for any prime k can be proved using the following result
140
+ of Frankl and Wilson.
141
+ Theorem 2.1 ([7]). Let p be a prime, and let a0, a1, . . . , ar be distinct residue classes
142
+ modulo p. Let F be a family of subsets of [n] and suppose that |F| ≡ a0 mod p for all
143
+ F ∈ F and that for every two distinct F1, F2 ∈ F, |F1∩F2| ≡ ai mod p for some 1 ≤ i ≤ r.
144
+ Then |F| ≤ �r
145
+ i=0
146
+ �n
147
+ i
148
+
149
+ .
150
+ Suppose k is a prime, n ≥ 2k and consider the family G of all stars K1,2k−1 with
151
+ center 1 and 2k − 1 leaves among the vertices {2, 3, . . . , n}. Thus |G| =
152
+ � n−1
153
+ 2k−1
154
+
155
+ . If two
156
+ such stars share exactly k − 1 common leaves then their symmetric difference is a copy of
157
+ K1,2k. A subset of G which is independent in the Cayley graph C(n, K1,2k) corresponds to
158
+ a collection of subsets of the set {2, 3, . . . , n}, each of size 2k − 1, where the intersection of
159
+ no two of these subsets is of cardinality k−1. Therefore, each of these sets is of cardinality
160
+ −1 modulo k and no intersection is of cardinality −1 modulo k. By the Frankl-Wilson
161
+ Theorem (Theorem 2.1) the cardinality of such a family is at most �k−1
162
+ i=0
163
+ �n−1
164
+ i
165
+
166
+ . Therefore,
167
+ for every prime k,
168
+ dK1,2k(n) ≤
169
+ �k−1
170
+ i=0
171
+ �n−1
172
+ i
173
+
174
+ � n−1
175
+ 2k−1
176
+
177
+ ≤ Ok( 1
178
+ nk ).
179
+ In order to prove the upper bound for all k we need the following result of Frankl and
180
+ F¨uredi.
181
+ Theorem 2.2 ([6]). For every fixed positive integers ℓ > ℓ1 +ℓ2 there exist n0 = n0(ℓ) and
182
+ dℓ > 0 so that for all n > n0, if F is a family of ℓ-subsets of [n] in which the intersection
183
+ of each pair of distinct members is of cardinality either at least ℓ − ℓ1 or strictly smaller
184
+ than ℓ2, then
185
+ |F| ≤ dℓ · nmax{ℓ1,ℓ2}.
186
+ Proof of Theorem 1.2, upper bound: The proof for stars is essentially identical to
187
+ the one described above for prime k, using Theorem 2.2 instead of Theorem 2.1.
188
+ Let
189
+ G be the family of all stars K1,2k−1 with center 1 and 2k − 1 leaves among the vertices
190
+ 4
191
+
192
+ {2, 3, . . . , n}. Thus |G| =
193
+ � n−1
194
+ 2k−1
195
+
196
+ . If two such stars share exactly k − 1 common leaves
197
+ then their symmetric difference is a copy of K1,2k. Therefore, by Theorem 2.2 above with
198
+ ℓ = 2k−1, ℓ1 = ℓ2 = k−1, the maximum cardinality of a subset of G which is independent
199
+ in the Cayley graph C(n, K1,2k) is at most some ck(n − 1)k−1 for all sufficiently large n.
200
+ This supplies the required upper bound
201
+ ck(n − 1)k
202
+ |G|
203
+ ≤ Ok( 1
204
+ nk ),
205
+ for dK1,2k(n). The proof for matchings is similar, starting with the family of all subsets
206
+ of cardinality 2k − 1 of a fixed matching of cardinality ⌊n/2⌋. The symmetric difference
207
+ of any two matchings that share exactly k − 1 common edges is a copy of M2k. Thus the
208
+ proof can proceed exactly as in the case of stars.
209
+
210
+ 2.2
211
+ Lower bounds
212
+ In order to lower bound the independence number of a Cayley graph C = C(n, H) it
213
+ suffices to upper bound its chromatic number. One way to do so is to assign to each edge
214
+ e of the complete graph on [n] a vector ve ∈ Zr
215
+ 2 for some r, so that for every H ∈ H,
216
+
217
+ e∈E(H) ve ̸= 0, where the sum is computed in Zr
218
+ 2. Given these vectors, we can assign
219
+ to each graph G on [n] the color �
220
+ e∈E(G) ve (computed, of course, in Zr
221
+ 2). This is clearly
222
+ a proper coloring of C by at most 2r colors. Note that the matrix whose columns are
223
+ the
224
+ �n
225
+ 2
226
+
227
+ vectors ve is the analogue of the parity-check matrix of a linear error correcting
228
+ code in the traditional theory of codes, and the color defined above is the analogue of the
229
+ syndrome of a word, see, e.g., [9] for more information about these basic notions.
230
+ Proof of Theorem 1.2, lower bound: For stars, it suffices to show that the chromatic
231
+ number of the Cayley graph C = C(n, K1,2k) is at most O(nk). Let s be the smallest
232
+ integer so that 2s − 1 ≥ n. As shown by the columns of the parity check matrix of a
233
+ BCH-code with designed distance 2k + 1 there is a collection S of 2s − 1 binary vectors
234
+ of length r = ks so that no sum of at most 2k of them (in Zks
235
+ 2 ) is the zero vector. Fix a
236
+ proper edge coloring c of Kn by n colors. For each edge e let ve be the vector number c(e)
237
+ in S. This gives the desired lower bound for stars. For matchings we use essentially the
238
+ same construction, starting with a (non-proper) edge coloring of Kn by n colors in which
239
+ each color class forms a star.
240
+
241
+ Proof of Proposition 1.3, lower bound:
242
+ As in the previous proof, but the initial
243
+ edge-coloring now is defined by c(ij) = i for all i < j and the binary vectors selected
244
+ are taken from the columns of the parity check matrix of a code with designed distance
245
+ 2r + 2. Let U be the set of vertices of a clique of size at least 2 and at most 4r + 3. Then
246
+ U contains at least 1 and at most 2r + 1 vertices i for which there is an odd number of
247
+ 5
248
+
249
+ vertices of U with index strictly larger than i. Therefore the sum of vectors corresponding
250
+ to the edges of the clique on U is equal to a sum of at most 2r + 1 column vectors of the
251
+ parity check matrix, which is nonzero.
252
+
253
+ Proof of Proposition 1.4, lower bound: This follows from the construction in the
254
+ proof of Theorem 1.6 described in the next section.
255
+ 3
256
+ Linear graph-codes
257
+ Proof of Theorem 1.6: The theorem is equivalent to the statement that for all n ≥ 2
258
+ the minimum possible r = r(n) so that there are graphs G1, . . . , Gr on the vertex set [n]
259
+ such that every clique on a subset of cardinality at least 2 of [n] contains an odd number
260
+ of edges of at least one graph Gi, is r = [n/2]. It clearly suffices to prove the upper bound
261
+ for odd n (that imply the result for n − 1) and the lower bound for even n (implying the
262
+ result for n + 1). The upper bound is described in what follows. Let n ≥ 3 be odd. Split
263
+ the numbers [n − 1] = {1, 2, . . . , n − 1} into the (n − 1)/2 blocks Bi = {2i − 1, 2i} for
264
+ 1 ≤ i ≤ (n − 1)/2. Let Gi be the graph consisting of all edges of the n − 2i triangles with
265
+ a common base Bi on the vertices Bi ∪ {j} for 2i < j ≤ n. Our family of graphs is the
266
+ set of these (n − 1)/2 graphs Gi. Let K be an arbitrary clique on a subset A of at least
267
+ 2 vertices in [n]. If A contains a full block Bi for some i, then it contains exactly 2x + 1
268
+ edges of Gi, where x is the cardinality of the intersection of A with {2i + 1, 2i + 2, . . . , n}.
269
+ As this is odd for all x ≥ 0 we may assume that A contains no block Bi. In this case,
270
+ let j be the second largest element in A (recall that |A| ≥ 2). Clearly j ≤ n − 1, hence
271
+ it is contained in one of the blocks Bi. But in this case Gi contains exactly one edge
272
+ of the clique K, completing the proof of the upper bound. Note that it is simple to give
273
+ additional constructions with the same properties as any set of graphs that spans the same
274
+ subspace as the graphs above will do. In particular, we can replace one of the graphs Gi
275
+ by the complete graph Kn, which is the sum of all graphs Gi.
276
+ To prove the lower bound assume n is even and let G1, . . . Gn/2−1 be a family of n/2−1
277
+ graphs on [n]. We have to show that there is a clique on at least 2 vertices containing an
278
+ even number of edges of each Gi. We show that in fact there is such a clique on an even
279
+ number of vertices. To do so we apply the classical theorem of Chevalley and Warning
280
+ (cf., e.g., [2] or [12]). Recall that it asserts that any system of polynomials with n variables
281
+ over a finite field in which the number of variables exceeds the sum of the degrees, which
282
+ admits a solution, must admit another one (in fact, the number of solutions is divisible by
283
+ the characteristics). Associate each vertex i with a variable xi over Z2 and consider the
284
+ following homogeneous system of polynomial equations over Z2. For each graph Gs in our
285
+ 6
286
+
287
+ family,
288
+
289
+ ij∈E(Gs)
290
+ xixj = 0.
291
+ In addition, add the linear equation �n
292
+ i=1 xi = 0.
293
+ The sum of the degrees of the polynomials here is 2(n/2 − 1) + 1 = n − 1, which
294
+ is smaller than the number of variables.
295
+ Since the system is homogeneous it admits
296
+ the trivial solution xi = 0 for all i. Any other solution (which exists by the Chevalley
297
+ Warning Theorem) gives a clique on the set of vertices {i : xi = 1} which is nonempty, of
298
+ even cardinality, and contains an even number of edges (possibly zero) of each Gi. This
299
+ establishes the lower bound and completes the proof of Theorem 1.6.
300
+
301
+ 4
302
+ Concluding remarks and open problems
303
+ • Question 1.1, which is equivalent to the problem of deciding whether or not for any
304
+ fixed nonempty graph H with an even number of edges dH(n) tends to 0 as n tends
305
+ to infinity, remains wide open.
306
+ An interesting special case is whether or not dK4(n) = o(1). It is also interesting
307
+ to decide whether or not dK4(n) ≥
308
+ 1
309
+ no(1) . It is not difficult to show that the latter
310
+ would follow from the existence (if true) of an edge coloring of Kn by no(1) colors
311
+ with no copy of K4 in which every color appears an even number of times. This may
312
+ be related to the construction in [10], see also [4].
313
+ • Gowers conjectured in [8] that any family of a constant fraction of all graphs on [n],
314
+ where n is sufficiently large, contains two graphs G1, G2 such that G2 is a subgraph
315
+ of G1 and the symmetric difference of the two graphs (that is, the set of all edges of
316
+ G1 that are not in G2) forms a clique. This is clearly stronger than the conjecture
317
+ that dK(n) tends to 0 as n tends to infinity, which is also open. As explained in
318
+ [8] the question of Gowers can be viewed as the first unknown case of a polynomial
319
+ version of the density Hales-Jewett Theorem.
320
+ • As mentioned in the remark following the statement of Proposition 1.5, it is not
321
+ difficult to show that for every graph H with an even number of eges the maximum
322
+ possible cardinality of a linear family of graphs on [n] in which no symmetric differ-
323
+ ence is a copy of H, is o(2(n
324
+ 2)). As the proof applies Ramsey’s Theorem, it provides
325
+ very weak bounds. It will be interesting to establish tighter bounds for the linear
326
+ case. Theorem 1.6 provides an example of a tight result of this form.
327
+ • The problem considered above can be extended to hypergraphs. More generally, it
328
+ can be extended to other versions of problems about binary codes, where the coordi-
329
+ nates of each codeword are indexed by the elements of some combinatorial structure,
330
+ 7
331
+
332
+ and the forbidden symmetric differences correspond to a prescribed family of sub-
333
+ structures. Here is an example of a problem of this type. What is the maximum
334
+ possible cardinality of a collection of binary vectors whose coordinates are indexed
335
+ by the elements of the ordered set [n], where no symmetric difference of two dis-
336
+ tinct members of the collection forms an interval of length which is a cube of an
337
+ integer? The corresponding Cayley graph here has 2n vertices, and it is triangle-free
338
+ by Fermat’s last Theorem for cubes. Its independece number, which is the answer
339
+ to the question above, is o(2n). Indeed, this follows from the Furstenberg-S´ark¨ozy
340
+ Theorem and its extensions [11], by considering the maximum possible cardinality
341
+ of an independent set in the induced subgraph on the set of all vertices that are
342
+ characteristic vectors of an interval [i] = {1, . . . , i} for 0 ≤ i ≤ n.
343
+ References
344
+ [1] N. Alon, A. Gujgiczer, J. K¨orner, A. Milojevi´c and G. Simonyi, Structured codes of
345
+ graphs, SIAM J. Discrete Math., to appear.
346
+ [2] Z. I. Borevich and I. R. Shafarevich, Number Theory, Academic Press, New York,
347
+ 1966.
348
+ [3] A. Berger and Y. Zhao, K4-intersecting families of graphs, arXiv:2103.12671, 2021.
349
+ [4] D. Conlon, J. Fox, C. Lee and B. Sudakov, The Erd˝os-Gy´arf´as problem on generalized
350
+ Ramsey numbers, Proc. London Math. Soc. 110 (2015), 1–18.
351
+ [5] D. Ellis, Y. Filmus and E. Friedgut, Triangle-intersecting families of graphs, Journal
352
+ of the European Mathematical Society 14 (2012), No. 3, 841–885.
353
+ [6] P. Frankl and Z. F¨uredi, Forbidding just one intersection, J. Combin. Theory Ser. A
354
+ 39 (1985), no. 2, 160–176.
355
+ [7] P. Frankl and R. M. Wilson, Intersection theorems with geometric consequences,
356
+ Combinatorica 1 (1981), 357–368.
357
+ [8] W. T. Gowers, https://gowers.wordpress.com/2009/11/14/the-first-unknown-case-of-
358
+ polynomial-dhj/
359
+ [9] F. MacWilliams and N. Sloane, The Theory of Error-Correcting Codes, I. North-
360
+ Holland Mathematical Library, Vol. 16. North-Holland Publishing Co., Amsterdam-
361
+ New York-Oxford (1977).
362
+ [10] D. Mubayi, Edge-coloring cliques with three colors on all 4-cliques, Combinatorica 18
363
+ (1998), no. 2, 293–296.
364
+ 8
365
+
366
+ [11] A. S´ark¨ozy, On difference sets of sequences of integers. III. Acta Math. Acad. Sci.
367
+ Hungar. 31 (1978), no. 3-4, 355–386.
368
+ [12] W. M. Schmidt, Equations over Finite Fields, an Elementary Approach, Springer
369
+ Verlag Lecture Notes in Math., 1976.
370
+ 9
371
+
NtFQT4oBgHgl3EQfWjbh/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf,len=300
2
+ page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
3
+ page_content='13305v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
4
+ page_content='CO] 30 Jan 2023 Graph-Codes Noga Alon ∗ Abstract The symmetric difference of two graphs G1, G2 on the same set of vertices [n] = {1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
5
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
6
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
7
+ page_content=' , n} is the graph on [n] whose set of edges are all edges that belong to exactly one of the two graphs G1, G2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
8
+ page_content=' Let H be a fixed graph with an even (positive) number of edges, and let DH(n) denote the maximum possible cardinality of a family of graphs on [n] containing no two members whose symmetric difference is a copy of H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
9
+ page_content=' Is it true that DH(n) = o(2(n 2)) for any such H?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
10
+ page_content=' We discuss this problem, compute the value of DH(n) up to a constant factor for stars and matchings, and discuss several variants of the problem including ones that have been considered in earlier work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
11
+ page_content=' 1 Introduction 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
12
+ page_content='1 The problem The symmetric difference of two graph G1 = (V, E1) and G2 = (V, E2) on the same set of vertices V is the graph (V, E1 ⊕ E2) where E1 ⊕ E2 is the symmetric difference between E1 and E2, that is, the set of all edges that belong to exactly one of the two graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
13
+ page_content=' Put V = [n] = {1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
14
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
15
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
16
+ page_content=' , n} and let H be a family of graphs on the set of vertices [n] which is closed under isomorphism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
17
+ page_content=' A collection of graphs F on [n] is called an H-(graph)-code if it contains no two members whose symmetric difference is a graph in H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
18
+ page_content=' For the special case that H contains all copies of a single graph H on [n] this is called an H-code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
19
+ page_content=' Here we are interested in the maximum possible cardinality of such codes for various families H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
20
+ page_content=' Let DH(n) denote this maximum, and let dH(n) = DH(n) 2(n 2) denote the maximum possible fraction of the total number of graphs on [n] in an H-code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
21
+ page_content=' If H consists of all graphs isomorphic to one graph H, we denote dH(n) by dH(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
22
+ page_content=' Note that if H consists of all graphs with less than d edges, then DH(n) is simply the maximum ∗Princeton University, Princeton, NJ, USA and Tel Aviv University, Tel Aviv, Israel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
23
+ page_content=' Email: nalon@math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
24
+ page_content='princeton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
25
+ page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
26
+ page_content=' Research supported in part by NSF grant DMS-2154082 and by USA-Israel BSF grant 2018267.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
27
+ page_content=' 1 possible cardinality of a binary code of length �n 2 � and minimum distance at least d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
28
+ page_content=' This motivates the terminology “graph-codes” used here.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
29
+ page_content=' The case H = K where K is the family of all cliques is of particular interest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
30
+ page_content=' This case is motivated by a conjecture of Gowers raised in his blog post [8] in 2009 and is discussed briefly in the comments of that blog.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
31
+ page_content=' If H consists of all graphs with independence number at most 2, then dH(n) ≥ 1/8 for all n ≥ 3, as shown by the family of all graphs on [n] containing a triangle on the set of vertices {1, 2, 3}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
32
+ page_content=' An interesting result of Ellis, Filmus and Friedgut [5], settling a conjecture of Simonovits and S´os, asserts that this is tight for all n ≥ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
33
+ page_content=' The corresponding result, that dH′(n) = 1/26 for all n ≥ 4, where H′ is the family of all graphs with independence number at most 3, is proved in [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
34
+ page_content=' A more systematic study of the parameters DH(n) and dH(n) for various families of graphs H appears in the recent paper [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
35
+ page_content=' The families H considered in this work include the family of all disconnected graphs, the family of all graphs that are not 2-connected, the family of all non-Hamiltonian graphs and the family of all graphs that contain or do not contain a spanning star.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
36
+ page_content=' Additional families studied are all graphs that contain an induced or non-induced copy of a fixed graph T, or all graphs that do not contain such a subgraph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
37
+ page_content=' In this note we focus on the case that H consists of a single graph H and the case that H is the family of all cliques, or all cliques up to a prescribed size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
38
+ page_content=' Note that trivially, if every member of H has an odd number of edges then dH(n) ≥ 1 2 as the family of all graphs on [n] with an even number of edges forms an H-code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
39
+ page_content=' This suggests the following intriguing question.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
40
+ page_content=' Question 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
41
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
42
+ page_content=' Let H be a family of graphs closed under isomorphism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
43
+ page_content=' Is it true that dH(n) tends to 0 as n tends to infinity if and only if H contains a graph with an even number of edges ?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
44
+ page_content=' Equivalently: is it true that for any fixed graph H with an even number of edges, dH(n) tends to 0 as n tends to infinity ?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
45
+ page_content=' We also study the linear variant of these problems, where the H-codes considered are restricted to linear subspaces, that is, to families of graphs on [n] closed under symmetric difference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
46
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
47
+ page_content='2 Results Recall that K is the family of all cliques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
48
+ page_content=' Let K(r) denote the set of all cliques on at most r vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
49
+ page_content=' Let K1,t denote the star with t edges and let Mt denote the matching of t edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
50
+ page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
51
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
52
+ page_content=' For every positive integer k, dK1,2k(n) = Θk(1/nk) and dM1,2k(n) = Θk(1/nk).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
53
+ page_content=' Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
54
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
55
+ page_content=' For every integer r ≥ 1, dK(4r+3)(n) ≥ Ω( 1 nr ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
56
+ page_content=' 2 Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
57
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
58
+ page_content=' For the family K of all cliques, dK(n) ≥ 1 2[n/2] .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
59
+ page_content=' Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
60
+ page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
61
+ page_content=' Let H be a fixed graph obtained from two copies of a graph H′ by identifying the vertices of an independent set of H′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
62
+ page_content=' Then dH(n) ≤ |V (H)| n for all n ≥ |V (H)|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
63
+ page_content=' In particular, dH(n) tends to 0 as n tends to infinity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
64
+ page_content=' Remark: all lower bounds are proved by exhibiting proper colorings of the relevant Cayley graphs, and in all cases the constructed family is an affine space over Z2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
65
+ page_content=' Using a simple Ramsey-theoretic argument it is not difficult to show that for an affine space the maximum possible cardinality obtained is at most a fraction O(log log n/ log n) of all graphs on n vertices whenever the defining family contains a fixed graph with an even number of edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
66
+ page_content=' Since all lower bounds are obtained by what may be called linear graph-codes one can study this separately, as done for standard error correcting codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
67
+ page_content=' For the family of all cliques K we get here an exact result (strengthening the assertion of Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
68
+ page_content='4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
69
+ page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
70
+ page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
71
+ page_content=' For any n ≥ 2, the minimum possible co-dimension of a linear space of graphs on n vertices that contains no member of K is exactly [n/2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
72
+ page_content=' 2 Proofs 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
73
+ page_content='1 Upper bounds For a family of graphs H and an integer n, the Cayley graph C(n, H) is the graph whose vertices are all graphs on the n vertices [n], where two are adjacent iff their symmetric difference is a member of H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
74
+ page_content=' This is clearly a Cayley graph over the elementary abelian 2-group ZN 2 with N = �n 2 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
75
+ page_content=' The function DH(n) is just the independence number of this graph, dH(n) is the so called independence ratio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
76
+ page_content=' Since the graph C(n, H) is vertex tran- sitive, its independence ratio is exactly the reciprocal of its fractional chromatic number.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
77
+ page_content=' In order to prove an upper bound of α for its independence ratio it suffices to exhibit a set S of vertices that contains no independent set of size larger than α|S|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
78
+ page_content=' This applies also to weighted sets of vertices, but we will not use weights here.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
79
+ page_content=' Proof of Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
80
+ page_content='5: Let a + b denote the number of vertices of H′ where b is the size of its independent set so that H is obtained from two copies of H′ by identifying the vertices in this independent set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
81
+ page_content=' Thus the number of vertices of H is 2a + b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
82
+ page_content=' Consider the following set of m = ⌊(n − b)/a⌋ copies of H′ on subsets of the vertex set [n].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
83
+ page_content=' All of 3 them contain the same independent set on the vertices {n − b + 1, n − b + 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
84
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
85
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
86
+ page_content=' , n}, and the additional vertices of copy number i are the vertices (i − 1)a + 1, (i − 1)a + 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
87
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
88
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
89
+ page_content=' , ia}, where 1 ≤ i ≤ m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
90
+ page_content=' Each of these copies can be viewed as a vertex of the Cayley graph C = C(n, {H}).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
91
+ page_content=' Since the symmetric difference of every pair of such copies forms a copy of H, this set forms a clique of size m in C, implying that dH(n) ≤ 1 m ≤ |V (H)|/n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
92
+ page_content=' □ The proofs of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
93
+ page_content='2 for stars and for matchings are very similar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
94
+ page_content=' We describe the proof for stars and briefly mention the modification needed for matchings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
95
+ page_content=' The upper bound in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
96
+ page_content='2 for the star K1,1 is a special case of the result above (with H′ being a single edge).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
97
+ page_content=' The upper bound for any prime k can be proved using the following result of Frankl and Wilson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
98
+ page_content=' Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
99
+ page_content='1 ([7]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
100
+ page_content=' Let p be a prime, and let a0, a1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
101
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
102
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
103
+ page_content=' , ar be distinct residue classes modulo p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
104
+ page_content=' Let F be a family of subsets of [n] and suppose that |F| ≡ a0 mod p for all F ∈ F and that for every two distinct F1, F2 ∈ F, |F1∩F2| ≡ ai mod p for some 1 ≤ i ≤ r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
105
+ page_content=' Then |F| ≤ �r i=0 �n i � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
106
+ page_content=' Suppose k is a prime, n ≥ 2k and consider the family G of all stars K1,2k−1 with center 1 and 2k − 1 leaves among the vertices {2, 3, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
107
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
108
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
109
+ page_content=' , n}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
110
+ page_content=' Thus |G| = � n−1 2k−1 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
111
+ page_content=' If two such stars share exactly k − 1 common leaves then their symmetric difference is a copy of K1,2k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
112
+ page_content=' A subset of G which is independent in the Cayley graph C(n, K1,2k) corresponds to a collection of subsets of the set {2, 3, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
113
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
114
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
115
+ page_content=' , n}, each of size 2k − 1, where the intersection of no two of these subsets is of cardinality k−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
116
+ page_content=' Therefore, each of these sets is of cardinality −1 modulo k and no intersection is of cardinality −1 modulo k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
117
+ page_content=' By the Frankl-Wilson Theorem (Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
118
+ page_content='1) the cardinality of such a family is at most �k−1 i=0 �n−1 i � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
119
+ page_content=' Therefore, for every prime k, dK1,2k(n) ≤ �k−1 i=0 �n−1 i � � n−1 2k−1 � ≤ Ok( 1 nk ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
120
+ page_content=' In order to prove the upper bound for all k we need the following result of Frankl and F¨uredi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
121
+ page_content=' Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
122
+ page_content='2 ([6]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
123
+ page_content=' For every fixed positive integers ℓ > ℓ1 +ℓ2 there exist n0 = n0(ℓ) and dℓ > 0 so that for all n > n0, if F is a family of ℓ-subsets of [n] in which the intersection of each pair of distinct members is of cardinality either at least ℓ − ℓ1 or strictly smaller than ℓ2, then |F| ≤ dℓ · nmax{ℓ1,ℓ2}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
124
+ page_content=' Proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
125
+ page_content='2, upper bound: The proof for stars is essentially identical to the one described above for prime k, using Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
126
+ page_content='2 instead of Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
127
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
128
+ page_content=' Let G be the family of all stars K1,2k−1 with center 1 and 2k − 1 leaves among the vertices 4 {2, 3, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
129
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
130
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
131
+ page_content=' , n}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
132
+ page_content=' Thus |G| = � n−1 2k−1 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
133
+ page_content=' If two such stars share exactly k − 1 common leaves then their symmetric difference is a copy of K1,2k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
134
+ page_content=' Therefore, by Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
135
+ page_content='2 above with ℓ = 2k−1, ℓ1 = ℓ2 = k−1, the maximum cardinality of a subset of G which is independent in the Cayley graph C(n, K1,2k) is at most some ck(n − 1)k−1 for all sufficiently large n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
136
+ page_content=' This supplies the required upper bound ck(n − 1)k |G| ≤ Ok( 1 nk ), for dK1,2k(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
137
+ page_content=' The proof for matchings is similar, starting with the family of all subsets of cardinality 2k − 1 of a fixed matching of cardinality ⌊n/2⌋.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
138
+ page_content=' The symmetric difference of any two matchings that share exactly k − 1 common edges is a copy of M2k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
139
+ page_content=' Thus the proof can proceed exactly as in the case of stars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
140
+ page_content=' □ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
141
+ page_content='2 Lower bounds In order to lower bound the independence number of a Cayley graph C = C(n, H) it suffices to upper bound its chromatic number.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
142
+ page_content=' One way to do so is to assign to each edge e of the complete graph on [n] a vector ve ∈ Zr 2 for some r, so that for every H ∈ H, � e∈E(H) ve ̸= 0, where the sum is computed in Zr 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
143
+ page_content=' Given these vectors, we can assign to each graph G on [n] the color � e∈E(G) ve (computed, of course, in Zr 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
144
+ page_content=' This is clearly a proper coloring of C by at most 2r colors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
145
+ page_content=' Note that the matrix whose columns are the �n 2 � vectors ve is the analogue of the parity-check matrix of a linear error correcting code in the traditional theory of codes, and the color defined above is the analogue of the syndrome of a word, see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
146
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
147
+ page_content=', [9] for more information about these basic notions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
148
+ page_content=' Proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
149
+ page_content='2, lower bound: For stars, it suffices to show that the chromatic number of the Cayley graph C = C(n, K1,2k) is at most O(nk).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
150
+ page_content=' Let s be the smallest integer so that 2s − 1 ≥ n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
151
+ page_content=' As shown by the columns of the parity check matrix of a BCH-code with designed distance 2k + 1 there is a collection S of 2s − 1 binary vectors of length r = ks so that no sum of at most 2k of them (in Zks 2 ) is the zero vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
152
+ page_content=' Fix a proper edge coloring c of Kn by n colors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
153
+ page_content=' For each edge e let ve be the vector number c(e) in S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
154
+ page_content=' This gives the desired lower bound for stars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
155
+ page_content=' For matchings we use essentially the same construction, starting with a (non-proper) edge coloring of Kn by n colors in which each color class forms a star.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
156
+ page_content=' □ Proof of Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
157
+ page_content='3, lower bound: As in the previous proof, but the initial edge-coloring now is defined by c(ij) = i for all i < j and the binary vectors selected are taken from the columns of the parity check matrix of a code with designed distance 2r + 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
158
+ page_content=' Let U be the set of vertices of a clique of size at least 2 and at most 4r + 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
159
+ page_content=' Then U contains at least 1 and at most 2r + 1 vertices i for which there is an odd number of 5 vertices of U with index strictly larger than i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
160
+ page_content=' Therefore the sum of vectors corresponding to the edges of the clique on U is equal to a sum of at most 2r + 1 column vectors of the parity check matrix, which is nonzero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
161
+ page_content=' □ Proof of Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
162
+ page_content='4, lower bound: This follows from the construction in the proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
163
+ page_content='6 described in the next section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
164
+ page_content=' 3 Linear graph-codes Proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
165
+ page_content='6: The theorem is equivalent to the statement that for all n ≥ 2 the minimum possible r = r(n) so that there are graphs G1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
166
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
167
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
168
+ page_content=' , Gr on the vertex set [n] such that every clique on a subset of cardinality at least 2 of [n] contains an odd number of edges of at least one graph Gi, is r = [n/2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
169
+ page_content=' It clearly suffices to prove the upper bound for odd n (that imply the result for n − 1) and the lower bound for even n (implying the result for n + 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
170
+ page_content=' The upper bound is described in what follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
171
+ page_content=' Let n ≥ 3 be odd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
172
+ page_content=' Split the numbers [n − 1] = {1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
173
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
174
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
175
+ page_content=' , n − 1} into the (n − 1)/2 blocks Bi = {2i − 1, 2i} for 1 ≤ i ≤ (n − 1)/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
176
+ page_content=' Let Gi be the graph consisting of all edges of the n − 2i triangles with a common base Bi on the vertices Bi ∪ {j} for 2i < j ≤ n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
177
+ page_content=' Our family of graphs is the set of these (n − 1)/2 graphs Gi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
178
+ page_content=' Let K be an arbitrary clique on a subset A of at least 2 vertices in [n].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
179
+ page_content=' If A contains a full block Bi for some i, then it contains exactly 2x + 1 edges of Gi, where x is the cardinality of the intersection of A with {2i + 1, 2i + 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
180
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
181
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
182
+ page_content=' , n}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
183
+ page_content=' As this is odd for all x ≥ 0 we may assume that A contains no block Bi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
184
+ page_content=' In this case, let j be the second largest element in A (recall that |A| ≥ 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
185
+ page_content=' Clearly j ≤ n − 1, hence it is contained in one of the blocks Bi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
186
+ page_content=' But in this case Gi contains exactly one edge of the clique K, completing the proof of the upper bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
187
+ page_content=' Note that it is simple to give additional constructions with the same properties as any set of graphs that spans the same subspace as the graphs above will do.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
188
+ page_content=' In particular, we can replace one of the graphs Gi by the complete graph Kn, which is the sum of all graphs Gi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
189
+ page_content=' To prove the lower bound assume n is even and let G1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
190
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
191
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
192
+ page_content=' Gn/2−1 be a family of n/2−1 graphs on [n].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
193
+ page_content=' We have to show that there is a clique on at least 2 vertices containing an even number of edges of each Gi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
194
+ page_content=' We show that in fact there is such a clique on an even number of vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
195
+ page_content=' To do so we apply the classical theorem of Chevalley and Warning (cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
196
+ page_content=', e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
197
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
198
+ page_content=', [2] or [12]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
199
+ page_content=' Recall that it asserts that any system of polynomials with n variables over a finite field in which the number of variables exceeds the sum of the degrees, which admits a solution, must admit another one (in fact, the number of solutions is divisible by the characteristics).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
200
+ page_content=' Associate each vertex i with a variable xi over Z2 and consider the following homogeneous system of polynomial equations over Z2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
201
+ page_content=' For each graph Gs in our 6 family, � ij∈E(Gs) xixj = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
202
+ page_content=' In addition, add the linear equation �n i=1 xi = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
203
+ page_content=' The sum of the degrees of the polynomials here is 2(n/2 − 1) + 1 = n − 1, which is smaller than the number of variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
204
+ page_content=' Since the system is homogeneous it admits the trivial solution xi = 0 for all i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
205
+ page_content=' Any other solution (which exists by the Chevalley Warning Theorem) gives a clique on the set of vertices {i : xi = 1} which is nonempty, of even cardinality, and contains an even number of edges (possibly zero) of each Gi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
206
+ page_content=' This establishes the lower bound and completes the proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
207
+ page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
208
+ page_content=' □ 4 Concluding remarks and open problems Question 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
209
+ page_content='1, which is equivalent to the problem of deciding whether or not for any fixed nonempty graph H with an even number of edges dH(n) tends to 0 as n tends to infinity, remains wide open.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
210
+ page_content=' An interesting special case is whether or not dK4(n) = o(1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
211
+ page_content=' It is also interesting to decide whether or not dK4(n) ≥ 1 no(1) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
212
+ page_content=' It is not difficult to show that the latter would follow from the existence (if true) of an edge coloring of Kn by no(1) colors with no copy of K4 in which every color appears an even number of times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
213
+ page_content=' This may be related to the construction in [10], see also [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
214
+ page_content=' Gowers conjectured in [8] that any family of a constant fraction of all graphs on [n], where n is sufficiently large, contains two graphs G1, G2 such that G2 is a subgraph of G1 and the symmetric difference of the two graphs (that is, the set of all edges of G1 that are not in G2) forms a clique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
215
+ page_content=' This is clearly stronger than the conjecture that dK(n) tends to 0 as n tends to infinity, which is also open.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
216
+ page_content=' As explained in [8] the question of Gowers can be viewed as the first unknown case of a polynomial version of the density Hales-Jewett Theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
217
+ page_content=' As mentioned in the remark following the statement of Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
218
+ page_content='5, it is not difficult to show that for every graph H with an even number of eges the maximum possible cardinality of a linear family of graphs on [n] in which no symmetric differ- ence is a copy of H, is o(2(n 2)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
219
+ page_content=' As the proof applies Ramsey’s Theorem, it provides very weak bounds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
220
+ page_content=' It will be interesting to establish tighter bounds for the linear case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
221
+ page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
222
+ page_content='6 provides an example of a tight result of this form.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
223
+ page_content=' The problem considered above can be extended to hypergraphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
224
+ page_content=' More generally, it can be extended to other versions of problems about binary codes, where the coordi- nates of each codeword are indexed by the elements of some combinatorial structure, 7 and the forbidden symmetric differences correspond to a prescribed family of sub- structures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
225
+ page_content=' Here is an example of a problem of this type.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
226
+ page_content=' What is the maximum possible cardinality of a collection of binary vectors whose coordinates are indexed by the elements of the ordered set [n], where no symmetric difference of two dis- tinct members of the collection forms an interval of length which is a cube of an integer?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
227
+ page_content=' The corresponding Cayley graph here has 2n vertices, and it is triangle-free by Fermat’s last Theorem for cubes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
228
+ page_content=' Its independece number, which is the answer to the question above, is o(2n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
229
+ page_content=' Indeed, this follows from the Furstenberg-S´ark¨ozy Theorem and its extensions [11], by considering the maximum possible cardinality of an independent set in the induced subgraph on the set of all vertices that are characteristic vectors of an interval [i] = {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
230
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
231
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
232
+ page_content=' , i} for 0 ≤ i ≤ n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
233
+ page_content=' References [1] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
234
+ page_content=' Alon, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
235
+ page_content=' Gujgiczer, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
236
+ page_content=' K¨orner, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
237
+ page_content=' Milojevi´c and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
238
+ page_content=' Simonyi, Structured codes of graphs, SIAM J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
239
+ page_content=' Discrete Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
240
+ page_content=', to appear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
241
+ page_content=' [2] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
242
+ page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
243
+ page_content=' Borevich and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
244
+ page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
245
+ page_content=' Shafarevich, Number Theory, Academic Press, New York, 1966.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
246
+ page_content=' [3] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
247
+ page_content=' Berger and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
248
+ page_content=' Zhao, K4-intersecting families of graphs, arXiv:2103.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
249
+ page_content='12671, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
250
+ page_content=' [4] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
251
+ page_content=' Conlon, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
252
+ page_content=' Fox, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
253
+ page_content=' Lee and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
254
+ page_content=' Sudakov, The Erd˝os-Gy´arf´as problem on generalized Ramsey numbers, Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
255
+ page_content=' London Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
256
+ page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
257
+ page_content=' 110 (2015), 1–18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
258
+ page_content=' [5] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
259
+ page_content=' Ellis, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
260
+ page_content=' Filmus and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
261
+ page_content=' Friedgut, Triangle-intersecting families of graphs, Journal of the European Mathematical Society 14 (2012), No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
262
+ page_content=' 3, 841–885.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
263
+ page_content=' [6] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
264
+ page_content=' Frankl and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
265
+ page_content=' F¨uredi, Forbidding just one intersection, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
266
+ page_content=' Combin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
267
+ page_content=' Theory Ser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
268
+ page_content=' A 39 (1985), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
269
+ page_content=' 2, 160–176.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
270
+ page_content=' [7] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
271
+ page_content=' Frankl and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
272
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
273
+ page_content=' Wilson, Intersection theorems with geometric consequences, Combinatorica 1 (1981), 357–368.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
274
+ page_content=' [8] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
275
+ page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
276
+ page_content=' Gowers, https://gowers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
277
+ page_content='wordpress.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
278
+ page_content='com/2009/11/14/the-first-unknown-case-of- polynomial-dhj/ [9] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
279
+ page_content=' MacWilliams and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
280
+ page_content=' Sloane, The Theory of Error-Correcting Codes, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
281
+ page_content=' North- Holland Mathematical Library, Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
282
+ page_content=' 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
283
+ page_content=' North-Holland Publishing Co.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
284
+ page_content=', Amsterdam- New York-Oxford (1977).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
285
+ page_content=' [10] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
286
+ page_content=' Mubayi, Edge-coloring cliques with three colors on all 4-cliques, Combinatorica 18 (1998), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
287
+ page_content=' 2, 293–296.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
288
+ page_content=' 8 [11] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
289
+ page_content=' S´ark¨ozy, On difference sets of sequences of integers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
290
+ page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
291
+ page_content=' Acta Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
292
+ page_content=' Acad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
293
+ page_content=' Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
294
+ page_content=' Hungar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
295
+ page_content=' 31 (1978), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
296
+ page_content=' 3-4, 355–386.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
297
+ page_content=' [12] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
298
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
299
+ page_content=' Schmidt, Equations over Finite Fields, an Elementary Approach, Springer Verlag Lecture Notes in Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
300
+ page_content=', 1976.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
301
+ page_content=' 9' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/NtFQT4oBgHgl3EQfWjbh/content/2301.13305v1.pdf'}
OdAyT4oBgHgl3EQf7PpY/content/tmp_files/2301.00835v1.pdf.txt ADDED
@@ -0,0 +1,3589 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Timed Model-Based Mutation Operators for Simulink Models
2
+ Jian Chen* 1 Manar H. Alalfi 2 Thomas R. Dean 3
3
+ 1 Department of Electrical and Computer Engineering, Queen’s University
4
+ Kingston, ON, Canada
5
+ E-mail: jian.chen@queensu.ca
6
+ 2 Department of Computer Science, Ryerson University
7
+ Toronto, ON, Canada
8
+ E-mail: manar.alalfi@cs.ryerson.ca
9
+ 3 Department of Electrical and Computer Engineering, Queen’s University
10
+ Kingston, ON, Canada
11
+ E-mail: tom.dean@queensu.ca
12
+ Abstract
13
+ Model-based mutation analysis is a recent research area, and real-time system testing can benefit from
14
+ using model mutants. Model-based mutation testing (MBMT) is a particular branch of model-based test-
15
+ ing. It generates faulty versions of a model using mutation operators to evaluate and improve test cases.
16
+ Mutation testing is an effective way to ensure software correctness and has been applied to various appli-
17
+ cation areas. Simulink is a vital modeling language for real-time systems. This paper introduces Simulink
18
+ model mutation analysis to improve Model-in-the-loop (MIL) testing. We propose a set of Simulink mu-
19
+ tation operators based on AUTOSAR, which reflects the temporal correctness when a Simulink model
20
+ is mapped to Operating System tasks. We implement a mutation framework that generates mutants for
21
+ implicit clock Simulink models. Finally, we demonstrate how this framework generates mutants to reveal
22
+ task interference issues in the simulation. Our work integrates the Simulink model with the timed systems
23
+ to better support mutation testing automation.
24
+ Keywords: Mutation Testing, Model-Based Testing, Model-Based Mutation Testing, Mutation Operator,
25
+ Simulink, Real-Time System, Scheduling, AUTOSAR
26
+ 1.
27
+ Introduction
28
+ Today, cars come equipped with advanced technolo-
29
+ gies that did not exist before, such as Automatic
30
+ Emergency Braking (AEB), Adaptive Cruise Con-
31
+ trol (ACC), Lane Departure Warning/Lane Keeping,
32
+ and Autonomous driving. All of these features rely
33
+ on software to realize sophisticated control algo-
34
+ rithms. Generally, such software is developed within
35
+ the timed system context, in which the system cor-
36
+ rectness not only relies on the software implemented
37
+ functions correctness but also depends on the sys-
38
+ tem to meet time constraints. Many factors can con-
39
+ tribute to the execution time of a system running on
40
+ a target platform. Issues such as task interference
41
+ may cause delays during task execution. Software
42
+ quality plays a crucial role in such safety-critical ap-
43
+ plications.
44
+ Model-Based Testing (MBT) is a promising
45
+ technique for the automated testing of timed sys-
46
+ arXiv:2301.00835v1 [cs.SE] 2 Jan 2023
47
+
48
+ J. Chen et al. / Mutation Operators for Simulink Models
49
+ tems. A model represents the behavior of software,
50
+ and the model is usually abstracted from real-time
51
+ specifications. However, some modeling environ-
52
+ ments support this feature in the Hardware-in-the-
53
+ loop (HIL) simulation testing instead of the MIL.
54
+ For example, Matlab/Simulink (ML/SL) simulations
55
+ assume block behaviors are completed in nearly zero
56
+ execution time, while real execution requires a finite
57
+ execution time, which may cause a failure. ML/SL
58
+ models are based on the Synchronous Reactive (SR)
59
+ model 23 that may assume the task execution times
60
+ are zero. Errors in the model may not be apparent
61
+ without an explicit real-time execution in the MIL
62
+ phase. Usually, a Simulink model can be well simu-
63
+ lated in the MIL, but it may have errors in the real-
64
+ time context.
65
+ Hence, MBT needs an extension to accommo-
66
+ date the real-time context, which includes modeling
67
+ the system through a timed formalism, and check-
68
+ ing the implementation conforms to its specification.
69
+ Traditionally, this is done via conformance checks
70
+ 35. Recently, several tools have been proposed to
71
+ simulate the real-time execution effects for ML/SL
72
+ models in MIL, such as TrueTime 21, TRES 12,
73
+ Timing-aware blocks 27, and SimSched 9. SimSched
74
+ uses a model transformation to integrate scheduling
75
+ into the model to validate the real-time context dur-
76
+ ing simulation. To evaluate SimSched, we turn to
77
+ mutation testing using mutation analysis to assist the
78
+ evaluation of the SimSched tool.
79
+ In this paper, we propose a set of mutation op-
80
+ erators with a timed task model, which is based
81
+ on the AUTomotive Open System ARchitecture
82
+ (AUTOSAR), that reflects the temporal correctness
83
+ when a Simulink model is mapped to Real-Time Op-
84
+ erating System (RTOS) tasks in a real-time context.
85
+ This paper is organized as follows:
86
+ Section
87
+ 2 introduces background information.
88
+ Section 3
89
+ presents the set of proposed timed mutation oper-
90
+ ators for Simulink models. Section 4 explains the
91
+ usage of the timed mutation operators. Section 5
92
+ presents validation experiments and results. Section
93
+ 6 summarizes related studies in MBT. Finally, Sec-
94
+ tion 7 presents the conclusions of our work and out-
95
+ lines future work.
96
+ 2.
97
+ Background
98
+ This section gives an overview of the background
99
+ information on the material needed to explain our
100
+ work. We begin with a basic introduction to mu-
101
+ tation testing, Simulink, and AUTOSAR; then, we
102
+ present our timed task model.
103
+ 2.1.
104
+ Mutation testing
105
+ Mutation testing was introduced in the 1970s 17,13,22
106
+ and proved to be an effective way to reveal software
107
+ faults 32. It is a fault-based software testing tech-
108
+ nique, which has been extensively studied and used
109
+ for decades. It contributes a range of methods, tools,
110
+ and reliable results for software testing. Mutation
111
+ testing is designed to find valid test cases and dis-
112
+ cover real errors in the program.
113
+ Model-Based Mutation Testing (MBMT) takes
114
+ the advantages of both model-based testing and mu-
115
+ tation testing and has been widely applied to mul-
116
+ tiple types of models such as feature models 20,
117
+ statechart-based models 41,1, timed automata 3,2, and
118
+ Simulink 8,26,38. However, in real-time system de-
119
+ velopment, both logical and temporal correctness is
120
+ crucial to the correct system functionality. The tem-
121
+ poral correctness depends on timing assumptions for
122
+ each task. Timed Automata (TA) 4 is a common for-
123
+ malism to model and verify real-time systems to see
124
+ whether designs meet temporal requirements. Aich-
125
+ ernig et al.3 propose an MBMT technique for timed
126
+ automata that applies to input/output timed automata
127
+ (TAIO) model. Nilsson et al.
128
+ 28 add an extension
129
+ to the TA formalism with a task model, and their
130
+ mutation operators focus on timeliness. Simulink∗is
131
+ widely used for model-driven development of soft-
132
+ ware within the automotive sector. Most of the mu-
133
+ tation operators proposed for Simulink models are
134
+ from a property point of view either run-time or
135
+ design-time such as signal modification, arithmetic
136
+ alternation, or block change 18,38,36,43. Some of the
137
+ proposed mutation testings are targeted at test case
138
+ generation for Simulink models 8,19. However, there
139
+ is no mutation operator with an explicit clock model
140
+ for Simulink.
141
+ * https://www.mathworks.com/products/simulink.html
142
+
143
+ J. Chen et al. / Mutation Operators for Simulink Models
144
+ 2.2.
145
+ Simulink
146
+ Simulink is one of the most popular modeling lan-
147
+ guages for modeling dynamical systems, and MAT-
148
+ LAB provides a graphical programming environ-
149
+ ment to perform system simulations. Simulink mod-
150
+ els are graphical blocks and lines, and they are con-
151
+ nected by signals between input and output ports.
152
+ The Simulink simulation engine determines the ex-
153
+ ecution order of blocks based on the data depen-
154
+ dencies among the blocks before a simulation exe-
155
+ cution. Simulink defines two types of blocks, di-
156
+ rect feedthrough, and non-direct feedthrough, to as-
157
+ sure the correct data dependencies in the simula-
158
+ tion. Simulink uses the following two basic rules
159
+ 25 to determine the sorted execution order: A block
160
+ must be executed before any of the blocks whose
161
+ direct-feedthrough ports it drives; Blocks without
162
+ direct feedthrough inputs can execute in arbitrary or-
163
+ der as long as they precede any block whose direct-
164
+ feedthrough inputs they drive. All blocks are sched-
165
+ uled in sorted order and executed in sequential exe-
166
+ cution order. The Simulink engine maintains a vir-
167
+ tual clock to execute each ordered block at each vir-
168
+ tual time.
169
+ Simulink Coder†supports code generation and of-
170
+ fers a framework to execute the generated code in
171
+ a real-time environment. Simulink Coder can gen-
172
+ erate code for the periodic task, either using a sin-
173
+ gle task or a multi-task. Single-task implementa-
174
+ tions can preserve the semantics during the simula-
175
+ tion because the generated code is invoked by a sim-
176
+ ple scheduler in a single thread without preemptions.
177
+ For multi-task implementations, the generated code
178
+ is invoked by a rate monotonic (RM) 24 scheduler
179
+ in a multithreaded RTOS environment, where each
180
+ task is assigned a priority and preemptions occur be-
181
+ tween tasks. As a consequence of preemption and
182
+ scheduling, the implementation semantic can con-
183
+ flict with the model semantic in a multi-rate system.
184
+ 2.3.
185
+ AUTOSAR
186
+ AUTOSAR is an open industry standard to meet the
187
+ needs of future car development. AUTOSAR de-
188
+ fines three main layers: the application, the runtime
189
+ environment (RTE), and the basic software (BSW)
190
+ layer 40. The functions in the application layer are
191
+ implemented by SW-Cs, which encapsulate part or
192
+ all of the automotive electronic functions, as shown
193
+ in Figure 1. The components communicate via a
194
+ Virtual Functional Bus (VFB), which is an abstrac-
195
+ tion of all the communication mechanisms of AU-
196
+ TOSAR. Engineers abstract the communication de-
197
+ tails of software components employing VFBs. A
198
+ set of runnables represents the SW-Cs internal be-
199
+ haviors, and a runnable is the smallest executable
200
+ code that can be individually scheduled, either by
201
+ a timer or an event. Lastly, runnables are required
202
+ to map to a set of tasks for a target platform, and
203
+ the mapping has to preserve ordering relations and
204
+ causal dependencies. Simulink has supported AU-
205
+ TOSAR compliant code generation since version
206
+ R2006a‡.
207
+ All AUTOSAR concepts can be repre-
208
+ sented by Simulink blocks and the existing Simulink
209
+ blocks can be easily used in the AUTOSAR devel-
210
+ opment process. Some of AUTOSAR concepts and
211
+ Simulink concepts mapping relation is shown in Ta-
212
+ ble 1 39.
213
+ Fig. 1. AUTOSAR components, interfaces, and runnables.
214
+ (Adapted from 5)
215
+ † https://www.mathworks.com/products/simulink-coder.html
216
+ ‡ https://www.mathworks.com/products/simulink.html
217
+
218
+ SW-C 1
219
+ SW-C 2
220
+ SW-C 3
221
+ SW-C n
222
+ Runnable 2a
223
+ Runnable 3a
224
+ Runnable na
225
+ Runnable 1a
226
+
227
+ Runnable 2b
228
+ Runnable nb
229
+ NTOIA
230
+ V
231
+ OIDID
232
+ Virtual Function Bus(VFB)
233
+ Tool supporting deployment
234
+ System
235
+ ECU
236
+ of sW components
237
+ Constraints
238
+ Descriptions
239
+
240
+ ECU I
241
+ ECU II
242
+ ECU M
243
+ SW-C 1
244
+ SW-C 3
245
+ SW-C 2
246
+ SW-C n
247
+ RTE
248
+ RTE
249
+ RTE
250
+ Basic Software
251
+ Basic Software
252
+ Basic SoftwareJ. Chen et al. / Mutation Operators for Simulink Models
253
+ Table 1. Examples of ML/SL and AUTOSAR Concepts Map-
254
+ ping.
255
+ ML/SL
256
+ AUTOSAR
257
+ Subsystem
258
+ Atomic Software
259
+ Component
260
+ Function-call subsystem
261
+ Runnable
262
+ Function calls
263
+ RTEEvents
264
+ 2.4.
265
+ Task model
266
+ In automotive software, Simulink models are often
267
+ drawn from real-time specifications and are realized
268
+ as a set of tasks running on an RTOS. In order to
269
+ better test this kind of software in the MIL phase,
270
+ model-based testing needs to be scaled to the real-
271
+ time context, which includes a timed formalism to
272
+ model the system under test conforming with the
273
+ real-time requirements. We define a task model to
274
+ model the timing properties of tasks in the Simulink
275
+ environment and the application is modeled as a set
276
+ of periodic tasks.
277
+ A task model, T, is represented by a tuple
278
+ {φ,ρ,c,γ, prect, precr, prio, jitter}, where φ is an
279
+ offset of the task, ρ is the period of the task, c is the
280
+ Worst Case Execution Time (WCET) of the task, γ
281
+ is a list of runnables that belong to the task, prect is
282
+ the precedence constraint of the task, precr is the
283
+ precedence constraint of the runnables within the
284
+ task, prio is the priority associated with the task,
285
+ and jitter is the deviation of a task from the periodic
286
+ release times. Every task has an implicit deadline
287
+ which means the deadline of a task is equal to ρ. An
288
+ offset φ refers to the time delay between the arrival
289
+ of the first instance of a periodic task and its release
290
+ time. A WCET is the summation of each runnable
291
+ execution time. A precedence constraint prect is a
292
+ list of tasks that specifies the task execution order,
293
+ and precr is a list of runnables within a task.
294
+ Fig. 2. Task states and transitions of task model.
295
+ Figure 2 shows the task-state and transition dia-
296
+ grams of the task model that is based on OSEK’s ba-
297
+ sic task-state model. The task model includes three
298
+ states: suspended, ready, and running, and four tran-
299
+ sitions: Active, Stare, Preempt, and Terminate. The
300
+ transitions represent the actions to activate, start,
301
+ preempt, or terminate a task.
302
+ Fig. 3. Task timing parameters shown in Gantt chart (all
303
+ related to Task2).
304
+ Figure 3 shows the timing parameters of a task
305
+ model and different timing parameters can alter the
306
+ application’s real-time behavior within a system.
307
+ 3.
308
+ Mutation Operators for Simulink Model
309
+ This section introduces a mutation analysis ap-
310
+ proach to validate real-time context during the sim-
311
+ ulation. Mutation operators are the key elements of
312
+ mutation testing. Model-based mutation testing is
313
+ a method of injecting faults into models to check
314
+ whether the tests are passed or failed, thus validat-
315
+ ing the software. The injecting faults are the muta-
316
+ tion operators. Before we apply mutation operators
317
+ to the model, we need to identify them which is to
318
+
319
+ Start
320
+ Ready
321
+ Running
322
+ Preempt
323
+ Active
324
+ Terminate
325
+ SuspendedPriority
326
+ Active
327
+ Start
328
+ Preempt
329
+ Terminate
330
+ Task1
331
+ Task2
332
+ offseti
333
+ C1
334
+ C2
335
+ jitter
336
+ period
337
+ TimeJ. Chen et al. / Mutation Operators for Simulink Models
338
+ understand what kind of errors can cause failure. We
339
+ have proposed the following task-related mutation
340
+ operators.
341
+ 3.1.
342
+ Offset mutation operators
343
+ The task release offset is one of the factors that affect
344
+ the computation result in terms of task interference.
345
+ In order to take the offset into account for analy-
346
+ sis, we introduced an offset mutation operator. For a
347
+ known offset φ, a task can now execute after φ time
348
+ units with respect to the start of its period. The exe-
349
+ cution time of the task is unchanged at c time units
350
+ before the next period starts.
351
+ 3.1.1. mITO
352
+ This operator adds δ time to the current offset. For a
353
+ given task τi{φi,ρi,ci,γi, precti, precri, prioi, jitteri} ∈
354
+ T, this mutation operator changes the offset φi to
355
+ φi +δ.
356
+ 3.1.2. mDTO
357
+ This operator subtracts δ time to the current offset.
358
+ For a given task τi{φi,ρi,ci,γi, precti, precri, prioi, jitteri} ∈
359
+ T, this mutation operator changes the offset φi to
360
+ φi −δ.
361
+ 3.2.
362
+ Period mutation operators
363
+ An RTOS usually applies a preemptive multitask-
364
+ ing scheduling algorithm to determine the execu-
365
+ tion order of tasks, and the most picked algorithm is
366
+ fixed-priority scheduling (FPS). The algorithm as-
367
+ signs each task a static priority level. The RTOS
368
+ scheduler executes the highest priority task from the
369
+ ready task queue. Simulink Coder supports an RM
370
+ scheduler, where the priority of a task is associated
371
+ with its period, if a task has a smaller period, then it
372
+ has a higher priority. Furthermore, a lower-priority
373
+ task can be preempted by a more top-priority task
374
+ during the execution.
375
+ 3.2.1. mITPER
376
+ This operator increases the period of a task, which
377
+ changes the task to a slower rate. For a given task
378
+ τi{φi,ρi,ci,γi, precti, precri, prioi, jitteri} ∈ T, this
379
+ mutation operator changes the period of the task i
380
+ to ρi +δ.
381
+ 3.2.2. mDTPER
382
+ This operator decreases the period of a task, which
383
+ changes the task to a faster rate. For a given task
384
+ τi{φi,ρi,ci,γi, precti, precri, prioi, jitteri} ∈ T, this
385
+ mutation operator changes the period of the task i
386
+ to ρi −δ.
387
+ 3.3.
388
+ Execution time mutation operators
389
+ The correctness of a real-time system is determined
390
+ on one hand by the computation results of the log-
391
+ ical program, and on the other hand, is strictly re-
392
+ lated to the time at which the results are produced.
393
+ Hence, execution time analysis is essential during
394
+ the process of designing and verifying embedded
395
+ systems. For this reason, we propose execution time
396
+ operators, which can adjust the execution time of
397
+ each task at the runnable level to simulate the run
398
+ time execution on different processor speeds. The
399
+ longer execution time of a task may lead to a sce-
400
+ nario where a lower-rate task blocks a higher-rate
401
+ task so that it misses its deadline.
402
+ 3.3.1. mITET
403
+ This operator adds δ time to the current exe-
404
+ cution time of each runnable, which increases
405
+ the total execution time.
406
+ For a given task
407
+ τi{φi,ρi,ci,γi, precti, precri, prioi, jitteri} ∈ T, this
408
+ mutation operator changes the execution time ci to
409
+ ci +δ.
410
+ 3.3.2.
411
+ mDTET
412
+ This operator subtracts δ
413
+ time from the cur-
414
+ rent execution time of each runnable, which de-
415
+ creases the total execution time. For a given task
416
+ τi{φi,ρi,ci,γi, precti, precri, prioi, jitteri} ∈ T, this
417
+ mutation operator changes the execution time ci to
418
+ ci −δ.
419
+
420
+ J. Chen et al. / Mutation Operators for Simulink Models
421
+ 3.4.
422
+ Execution precedence mutation operators
423
+ The RTOS scheduler selects tasks to execute accord-
424
+ ing to the priority level of the task. However, the
425
+ spawn order determines the execution order of tasks
426
+ with the same priority. Whichever task is spawned
427
+ first is realized and gets the CPU first to run. This re-
428
+ sults in situations where a pair of tasks have a prece-
429
+ dence relation in the implementation that does not
430
+ exist in the design phase lost an existing precedence
431
+ relation in the implementation. The incorrect prece-
432
+ dence can cause a wrong execution order of tasks.
433
+ Hence, we proposed the precedence mutation oper-
434
+ ators which can specify a precedence relation be-
435
+ tween a pair of tasks and runnables. This operator
436
+ creates mutants by assigning a specific execution or-
437
+ der to a set of tasks or runnable to reflect the prece-
438
+ dence relation.
439
+ 3.4.1. mATPREC
440
+ For a given task τi{φi,ρi,ci,γi, precti, precri, prioi,
441
+ jitteri} ∈ T, for each task τ j ∈ T (j ̸= i), if τj /∈
442
+ precti, this mutation operator adds τj to precti.
443
+ 3.4.2. mRTPREC
444
+ For a given task τi{φi,ρi,ci,γi, precti, precri, prioi,
445
+ jitteri} ∈ T, for each task τj ∈ T (j ̸= i), if
446
+ τj ∈ precti, this mutation operator removes τ j from
447
+ precti.
448
+ 3.4.3. mARPREC
449
+ For a given task τi{φi,ρi,ci,γi, precti, precri, prioi,
450
+ jitteri} ∈ T, for each runnable γim ∈ τi, if γim /∈
451
+ precri, this mutation operator adds γim to precri.
452
+ 3.4.4. mRRPREC
453
+ For a given task τi{φi,ρi,ci,γi, precti, precri, prioi,
454
+ jitteri} ∈ T, for each runnable γim ∈ τi, if γim ∈
455
+ precri, this mutation operator removes γim from
456
+ precri.
457
+ 3.5.
458
+ Priority mutation operators
459
+ In an RTOS, each task is assigned a relative priority,
460
+ which is a static integer to identify the degree of im-
461
+ portance of tasks. The highest priority task always
462
+ gets the CPU when it becomes ready to run. The
463
+ most common RTOS scheduling algorithm is pre-
464
+ emptive scheduling.
465
+ 3.5.1. mITPRI
466
+ This operator increases the priority of a task. For a
467
+ given task τi{φi,ρi,ci,γi, precti, precri, prioi, jitteri} ∈
468
+ T, this mutation operator changes the priority of the
469
+ task prioi to proii +δ.
470
+ 3.5.2. mDTPRI
471
+ This operator decreases the priority of a task. For a
472
+ given task τi{φi,ρi,ci,γi, precti, precri, prioi, jitteri} ∈
473
+ T, this mutation operator changes the period of the
474
+ task i to proii −δ.
475
+ 3.6.
476
+ Jitter mutation operators
477
+ Timing jitter exists in the RTOS, and it is the delay
478
+ between subsequent periods of time for a given task.
479
+ 3.6.1. mITJ
480
+ This operator increases the jitter time of a task. For a
481
+ given task τi{φi,ρi,ci,γi, precti, precri, prioi, jitteri} ∈
482
+ T, this mutation operator changes the priority of the
483
+ task jitteri to jitteri +δ.
484
+ 3.6.2.
485
+ mDTJ
486
+ This operator decreases the jitter time of a task. For
487
+ a given task τi{φi,ρi,ci,γi, precti, precri, prioi, jitteri} ∈
488
+ T, this mutation operator changes the period of the
489
+ task jitteri to jitteri −δ.
490
+ 3.7.
491
+ Shared memory mutation operators
492
+ It is common that RTOS tasks exchange data or
493
+ information via shared memory(e.g., global vari-
494
+ able, memory buffer, hardware register). The shared
495
+ memory can easily cause access conflict if the logi-
496
+ cal software design is neglected in any corner case.
497
+ Here we introduce a set of variable mutation opera-
498
+ tors.
499
+
500
+ J. Chen et al. / Mutation Operators for Simulink Models
501
+ 3.7.1. mDSM
502
+ This operator defines a new value to a global vari-
503
+ able in a task. If a task reads this global variable,
504
+ then we define a new value right before the reads
505
+ occurred.
506
+ 3.7.2. mUDSM
507
+ This operator un-defines a global variable in a task.
508
+ If a task writes this global variable, then ignore this
509
+ writes operation.
510
+ 3.7.3. mRDSM
511
+ This operator removes the definition of a global vari-
512
+ able. If a global variable is initialized in a task then
513
+ do not initialize it.
514
+ 3.7.4. mRSM
515
+ This operator adds a reference to a global variable.
516
+ 3.7.5. mRMSMR
517
+ This operator removes reference to a global variable.
518
+ 3.7.6. mRSMR
519
+ This operator replaces a reference of a global vari-
520
+ able with a different global variable.
521
+ Table 2. Simuilnk Mutation Operators
522
+ Mutation Key
523
+ Title
524
+ mITO
525
+ Increase Task Offset
526
+ mDTO
527
+ Decrease Task Offset
528
+ mITPER
529
+ Increase Task Period
530
+ mDTPER
531
+ Decrease Task Period
532
+ mITET
533
+ Increase Task Execution Time
534
+ mDTET
535
+ Decrease Task Execution Time
536
+ mATPREC
537
+ Add Task Precedence
538
+ mRTPREC
539
+ Remove Task Precedence
540
+ mARPREC
541
+ Add Runnable Precedence
542
+ mRRPREC
543
+ Remove Runnable Precedence
544
+ mITPRI
545
+ Increase Task Priority
546
+ mDTPRI
547
+ Decrease Task Priority
548
+ mITJ
549
+ Increase Task Jitter
550
+ mDTJ
551
+ Decrease Task Jitter
552
+ mDSM
553
+ Define Shared Memory
554
+ mUDSM
555
+ Un-define Shared Memory
556
+ mRDSM
557
+ Remove Definition Shared Memory
558
+ mRSM
559
+ Reference a Shared Memory
560
+ mRMSMR
561
+ Remove a Shared Memory Reference
562
+ mRSMR
563
+ Replace a Shared Memory Reference
564
+ 4.
565
+ Mutation operators demonstration
566
+ We have introduced twenty mutation operators cate-
567
+ gorized into seven classes and explained each mu-
568
+ tation class.
569
+ The mutation operators are summa-
570
+ rized in Table 2. We use simple examples to demon-
571
+ strate the use of each mutation operator. To demon-
572
+ strate our mutation operators, we use the tool Sim-
573
+ Sched to alter the properties of software applications
574
+ realized as Simulink models.
575
+ From Table 1, we
576
+ know that each function-call subsystem represents
577
+ an AUTOSAR runnable. The function-call subsys-
578
+ tem can be executed conditionally when a function-
579
+ call event signal arrives. Both an S-function block
580
+ and a Stateflow block can provide such a function-
581
+ call event. SimSched applies the function-call in-
582
+ vocation mechanism to use an S-function to gener-
583
+ ate a function-call event to schedule each runnable
584
+ (function-call subsystem). Figure 4 shows the Sim-
585
+ Sched parameters dialogue that we can utilize it to
586
+ adjust the timing properties to implement the muta-
587
+ tion operator for Simulink models.
588
+
589
+ J. Chen et al. / Mutation Operators for Simulink Models
590
+ Fig. 4. SimSched Parameter setting dialogue.
591
+ In this section, we use several simple examples
592
+ to exhibit the mutants generated by our mutation op-
593
+ erators. Figure 5 illustrates the use of SimSched to
594
+ schedule a Simulink model. In this example, Sim-
595
+ Sched is at the top left corner, which schedules three
596
+ runnables (R1, R2, R3), and they are mapped to two
597
+ tasks (T1, T2). Runnable R1 is mapped to T1, the pe-
598
+ riod of T1 is 10ms, priority is 2, and the execution
599
+ time of R1 is 3ms. R2 and R3 are mapped to T2. The
600
+ period of T2 is 20ms, priority is 1, and the execu-
601
+ tion time of R2 and R3 are 3ms and 3ms accordingly.
602
+ The detailed parameter settings are listed in Table 3.
603
+ There is a Data Store Memory block in this exam-
604
+ ple, named A, which defines a shared data store that
605
+ is a memory area used by Data Store Read and Data
606
+ Store Write block with the same data store name. R1
607
+ writes a constant value to a global variable A. R2
608
+ reads A first then writes the summation of A and its
609
+ delay value to A. R3 reads A then subtracts its delay
610
+ value from A, and outputs the result.
611
+ Table 3. The simple example settings
612
+ Task
613
+ Period
614
+ Execution
615
+ Priority
616
+ Runnable
617
+ (ms)
618
+ Time(ms)
619
+ T1
620
+ 10
621
+ 3
622
+ 2
623
+ R1
624
+ T2
625
+ 20
626
+ 3
627
+ 1
628
+ R2
629
+ T2
630
+ 20
631
+ 3
632
+ 1
633
+ R3
634
+ Fig. 5. A simple example of using SimSched to schedule
635
+ AUTOSAR SW-Cs.
636
+ Fig. 6. Task executions Gantt chart of the running example.
637
+ Fig. 7. A simple example of using Stateflow to schedule
638
+ AUTOSAR SW-Cs.
639
+ Fig. 8. Simple example output of Stateflow scheduler
640
+ simulation.
641
+
642
+ BlockParameters:SimSched
643
+ S-Function (mask)
644
+ Parameters
645
+ Task:
646
+ [1, 2]
647
+ Priority:
648
+ [2,1]
649
+ Period:
650
+ [10,20]
651
+ Runnable:
652
+ "R1','R2',"'R3'
653
+ [1, 2, 2]
654
+ Task Mapping:
655
+ Execution Time:
656
+ [3, 3, 3]
657
+ [0,0,0]
658
+ Offset
659
+ [0,0,0]
660
+ Jitter
661
+ OK
662
+ Cancel
663
+ Help
664
+ ApplyScheduler
665
+ Runnable(period, execution time, priority) Task()
666
+ R1(10ms, 3ms, 2)
667
+ Task(1)
668
+ R2( 20ms, 3ms,
669
+ Task(2)
670
+ R3 20ms, 3ms,
671
+ Task(2)
672
+ function()
673
+ function()
674
+ function(
675
+ SimSched
676
+ function
677
+ Runnable1 subsystem
678
+ Runnable2 subsystem
679
+ Runnable3 subsystem1
680
+ 0.5
681
+ 0
682
+ 0
683
+ 0.01
684
+ 0.02
685
+ 0.03
686
+ 0.04
687
+ 0.05
688
+ 0.06
689
+ 1
690
+ 0.5
691
+ 0
692
+ 0
693
+ 0.01
694
+ 0.02
695
+ 0.03
696
+ 0.04
697
+ 0.05
698
+ 0.060:0
699
+ call()
700
+ 0:1
701
+ R1()
702
+ 1 ms Clock
703
+ R2()
704
+ 0:1
705
+ R3()
706
+ call(
707
+ Temporal Logic
708
+ 0:6
709
+ Scheduler
710
+ 0:1
711
+ 0:1
712
+ 0:1
713
+ R1()
714
+ R2()
715
+ R3()
716
+ R1
717
+ 0:7
718
+ A
719
+ subrater
720
+ Runnable1 subsystem1
721
+ Runnable2 subsystem1
722
+ Runnable3 subsystem'11
723
+ 10
724
+ 9
725
+ 0
726
+ 0.01
727
+ 0.02
728
+ 0.03
729
+ 0.04
730
+ 0.05
731
+ 0.06
732
+ 40
733
+ R2
734
+ 0
735
+ 0
736
+ 0.01
737
+ 0.02
738
+ 0.03
739
+ 0.04
740
+ 0.05
741
+ 0.06
742
+ 20
743
+ 10
744
+ 0
745
+ 0
746
+ 0.01
747
+ 0.02
748
+ 0.03
749
+ 0.04
750
+ 0.05
751
+ 0.06
752
+ TimeJ. Chen et al. / Mutation Operators for Simulink Models
753
+ We use a Stateflow scheduler version of the sim-
754
+ ple example shown in Figure 7 to show the typical
755
+ Stateflow scheduler simulation result, then compare
756
+ it with the SimSched scheduler simulation result.
757
+ The task parameters are all the same shown in Table
758
+ 3. We apply the same task configurations for both
759
+ the Stateflow scheduler and SimSched models for
760
+ simulation. Figure 8 shows the Stateflow scheduler
761
+ simulation result, and Figure 9 shows the SimSched
762
+ simulation result. The result figures show the output
763
+ value of each runnable. From the figure, we can see
764
+ that R1, R2 and R3 are all executed at time 0 in Fig-
765
+ ure 8; R1, is executed time 0, R2 is executed at 3ms,
766
+ and R3 is executed at 6ms in Figure 9. R2 and R3 are
767
+ executed later than the Stateflow scheduler simula-
768
+ tion in Figure 8. This is because that SimSched takes
769
+ into account execution time, and each task must be
770
+ executed until the previous task is completed on a
771
+ single core platform.
772
+ Fig. 9. Simple example output of SimSched simulation without
773
+ applying any mutation operator.
774
+ 4.1.
775
+ Offset mutation operators
776
+ We first apply the mITO mutation operator to the
777
+ running example, let’s say that increase δ1 = 3ms
778
+ for T1 then we have the task execution timeline in
779
+ Figure 10. We can see T2 is executed first at time
780
+ 0, and T1 preempts T2 at 3ms in the first period due
781
+ to the offset effect. After the first period, there is
782
+ no preemption between T1 and T2. Then, we apply
783
+ the mDTO mutation operator based on the previous
784
+ settings. We set δ1 = −1ms to T1 then the offset
785
+ for T1 is 2ms. Figure 11 shows the task execution
786
+ timeline. T2 is preempted by T1 during the execution
787
+ of the first period. Compared to the task execution
788
+ Gantt chart of our running example shown in Figure
789
+ 6 with no offset, we can clearly see the preemption
790
+ effect.
791
+ Fig. 10. Task executions Gantt chart of the running example
792
+ after increase offset mutation operator is applied.
793
+ Fig. 11. Task executions Gantt chart of the running example
794
+ after decrease offset mutation operator is applied.
795
+ The running example’s output after applying the
796
+ mITO mutation operator is shown in Figure 12
797
+ which is different from Figure 9. Because T1 pre-
798
+ empts T2 at the first instance execution, the output
799
+ of R3 is from zero to ten then goes back to zero
800
+ then goes up instead of always increasing value. The
801
+ running example’s output after applying the mDTO
802
+ mutation operator is the same as Figure 9 because
803
+ the offset operator only affects the initial execution
804
+ of each task, and the preemption occurs before the
805
+ first execution of R2 instance completion. Inside our
806
+ model scheduler program, we trigger each subsys-
807
+ tem at the end of each execution time slot. Techni-
808
+ cally, the execution order of this example is still R1,
809
+ R2, and R3 so the output of the simulation keeps the
810
+ same.
811
+ Fig. 12. Simple example output of SimSched simulation after
812
+ applying mITO mutation operator.
813
+
814
+ T1
815
+ 0
816
+ 0
817
+ 0.01
818
+ 0.02
819
+ 0.03
820
+ 0.04
821
+ 0.05
822
+ 0.06
823
+ Time(sec.)
824
+ T2
825
+ 0.5
826
+ 0
827
+ 0.01
828
+ 0.02
829
+ 0.03
830
+ 0.04
831
+ 0.05
832
+ 0.06
833
+ Time(sec.)T1
834
+ 06
835
+ 0
836
+ 0
837
+ 0.01
838
+ 0.02
839
+ 0.03
840
+ 0.04
841
+ 0.05
842
+ 0.06
843
+ Time(sec.)
844
+ T2
845
+ 0.5
846
+ 0
847
+ 0.01
848
+ 0.02
849
+ 0.03
850
+ 0.04
851
+ 0.05
852
+ 0.06
853
+ Time(sec.)Runnable1 subsystem
854
+ 11
855
+ 10
856
+ 9
857
+ 0
858
+ 0.01
859
+ 0.02
860
+ 0.03
861
+ 0.04
862
+ 0.05
863
+ 0.06
864
+ Time(sec.)
865
+ Runnable2_subsystem
866
+ 20
867
+ 10
868
+ 0
869
+ 0
870
+ 0.01
871
+ 0.02
872
+ 0.03
873
+ 0.04
874
+ 0.05
875
+ 0.06
876
+ Time(sec.)
877
+ Runnable3_subsystem
878
+ 20
879
+ 10
880
+ 0
881
+ 0
882
+ 0.01
883
+ 0.02
884
+ 0.03
885
+ 0.04
886
+ 0.05
887
+ 0.06
888
+ Time(sec.)11
889
+ 10
890
+ 9
891
+ 0
892
+ 0.01
893
+ 0.02
894
+ 0.03
895
+ 0.04
896
+ 0.05
897
+ 0.06
898
+ 30
899
+ 20
900
+ oR2
901
+ 0
902
+ 0.003
903
+ 0.01
904
+ 0.02
905
+ 0.03
906
+ 0.04
907
+ 0.05
908
+ 0.06
909
+ 20
910
+ 10
911
+ 0
912
+ 0
913
+ 0.006
914
+ 0.01
915
+ 0.02
916
+ 0.03
917
+ 0.04
918
+ 0.05
919
+ 0.06
920
+ TimeJ. Chen et al. / Mutation Operators for Simulink Models
921
+ 4.2.
922
+ Period mutation operators
923
+ We apply the mITPER operator to this example to
924
+ increase the period of a task. We set δ1 = 1ms to T1
925
+ so the period of the task T1 is 11ms now. Figure 13
926
+ shows that T2 is preempted at the time of 22ms, and
927
+ the simulation yields a wrong result due to this pre-
928
+ emption shown in Figure 14. The output of R3 is an
929
+ alternating value instead of an increasing value.
930
+ Fig. 13. Task executions Gantt chart of the running example
931
+ after increase period mutation operator is applied.
932
+ Fig. 14. Simple example output of SimSched simulation after
933
+ applying mITPER mutation operator.
934
+ We apply the mDTPER operator to this example
935
+ to decrease the period of a task. We set δ1 = −4ms
936
+ to T1 so the period of the task T1 is 6ms now. Then,
937
+ we run the simulation, T2 is preempted by T1 shown
938
+ in Figure 15 and it yields a wrong simulation result
939
+ shown in Figure 16. The output of R3 is either zero
940
+ or ten instead of an increasing value.
941
+ Fig. 15. Task executions Gantt chart of the running example
942
+ after decreasing period mutation operator is applied.
943
+ Fig. 16. Simple example output of SimSched simulation
944
+ after applying mDTPER mutation operator.
945
+ 4.3.
946
+ Execution time mutation operators
947
+ We apply the mITET operator to this example to in-
948
+ crease the execution time of a task. We can specify
949
+ any runnable to increase its execution time within a
950
+ task. For example, we set δ1 = 4ms to R2 in T2 so
951
+ the execution of R2 is 7ms and T2 takes 10ms to ex-
952
+ ecute now. Figure 17 shows that T2 is preempted at
953
+ the time of 10ms, and the simulation yields a wrong
954
+ result due to this preemption. The wrong result is
955
+ the same as the example of applying decreasing task
956
+ period. We apply the mDTET operator to this exam-
957
+ ple to decrease the execution time of a task. We set
958
+ δ1 = −1ms to T1 so the execution time of the task T1
959
+ is 2ms now. Then, we run the simulation, there is no
960
+ preemption that occurs between these two tasks and
961
+ the output is as expected as the original model.
962
+ Fig. 17. Task executions Gantt chart of the running example
963
+ after increase execution time mutation operator is applied.
964
+ 4.4.
965
+ Execution precedence mutation operators
966
+ We introduce the second example as Table 4 to ex-
967
+ plain the mATPREC and mRTPREC operators. Fig-
968
+ ure 18 shows the task execution Gantt chart of this
969
+ example. From the task execution chart, we can see
970
+ the execution order of the tasks is T1,T2,T1,T3.
971
+
972
+ 1
973
+ 0.5
974
+ 0E
975
+ 0
976
+ 0.01
977
+ 0.03
978
+ 0.04
979
+ 0.05
980
+ 0.06
981
+ Time(sec.)
982
+ 0.5
983
+ 0
984
+ 0
985
+ 0.01
986
+ 0.02
987
+ 0.03
988
+ 0.04
989
+ 0.05
990
+ 0.06
991
+ Time(sec.)Runnable1_sybsystem
992
+ 11
993
+ 10
994
+ 9
995
+ 0.1
996
+ 0.2
997
+ 0.3
998
+ 0.4
999
+ 0.5
1000
+ 0.6
1001
+ 0.7
1002
+ 0
1003
+ 0.8
1004
+ 0.9
1005
+ Runnable2_subsystem
1006
+ 400
1007
+ 200
1008
+ 0
1009
+ 0.1
1010
+ 0.2
1011
+ 0.3
1012
+ 0.4
1013
+ 0.5
1014
+ 0.6
1015
+ 0.7
1016
+ 0.8
1017
+ 0.9
1018
+ Runnable3_subsystem
1019
+ 400
1020
+ 200
1021
+ 0
1022
+ 0
1023
+ 0.1
1024
+ 0.2
1025
+ 0.3
1026
+ 9:0
1027
+ 0.0
1028
+ 0.8
1029
+ 0.9T1
1030
+ 1
1031
+ 0.5
1032
+ 0
1033
+ 0.01
1034
+ 0.02
1035
+ 0.03
1036
+ 0.04
1037
+ 0.05
1038
+ 0.06
1039
+ T2
1040
+ 1
1041
+ 0.5
1042
+ 0
1043
+ 0
1044
+ 0.02
1045
+ 0.03
1046
+ 0.04
1047
+ 0.05
1048
+ 0.06Runnable1_sybsystem
1049
+ 11
1050
+ 10
1051
+ 9.
1052
+ 0
1053
+ 0.01
1054
+ 0.02
1055
+ 0.03
1056
+ 0.04
1057
+ 0.05
1058
+ 0.06
1059
+ 0.07
1060
+ 0.08
1061
+ 0.09
1062
+ 0.1
1063
+ Runnable2_subsystem
1064
+ 40
1065
+ 20
1066
+ 0
1067
+ 0
1068
+ 0.01
1069
+ 0.02
1070
+ 0.03
1071
+ 0.04
1072
+ 0.05
1073
+ 0.06
1074
+ 0.07
1075
+ 0.08
1076
+ 0.09
1077
+ 0.1
1078
+ Runnable3_subsystem
1079
+ 10
1080
+ 5
1081
+ 0
1082
+ 0
1083
+ 0.01
1084
+ 0.02
1085
+ 0.03
1086
+ 0.04
1087
+ 0.05
1088
+ 0.06
1089
+ 0.07
1090
+ 0.08
1091
+ 0.09
1092
+ 0.11
1093
+ 0.5
1094
+ 0
1095
+ 0
1096
+ 0.01
1097
+ 0.02
1098
+ 0.03
1099
+ 0.04
1100
+ 0.05
1101
+ 0.06
1102
+ 1
1103
+ 0.5
1104
+ 0
1105
+ 0
1106
+ 0.01
1107
+ 0.02
1108
+ 0.03
1109
+ 0.04
1110
+ 0.05
1111
+ 0.06J. Chen et al. / Mutation Operators for Simulink Models
1112
+ Table 4. The simple example settings
1113
+ Task
1114
+ Period
1115
+ Execution
1116
+ Priority
1117
+ Runnable
1118
+ (ms)
1119
+ Time(ms)
1120
+ T1
1121
+ 5
1122
+ 1
1123
+ 3
1124
+ R1
1125
+ T2
1126
+ 10
1127
+ 4
1128
+ 2
1129
+ R2
1130
+ T3
1131
+ 10
1132
+ 3
1133
+ 1
1134
+ R3
1135
+ Fig. 18. Task executions Gantt chart of example 2.
1136
+ First, we assume there is no precedence rela-
1137
+ tion among tasks so we use the mATPREC mutation
1138
+ operator to add a precedence relation τ3 to prect2,
1139
+ which specifies that a new instance of T2 cannot start
1140
+ unless T3 has executed after the last instance of T2.
1141
+ Hence, we set the execution order that T3 is executed
1142
+ before T2 in the setting dialogue. Figure 19 shows
1143
+ the execution result that T2 is preempted by T1. If T2
1144
+ is not a re-entrant function then this preemption may
1145
+ cause potential failure execution.
1146
+ Fig. 19. Task executions Gantt chart of example 2 after task
1147
+ precedence mutation operator is applied.
1148
+ Then, we assume there is a precedence relation
1149
+ between T1 and T3 and the task execution diagram
1150
+ is the same in Figure 18. We apply the mRTPREC
1151
+ mutation operator to remove the precedence relation
1152
+ prect3 from τ1. The result is the same as shown in
1153
+ Figure 19.
1154
+ We add one runnable R4 to the first example and
1155
+ assign it to T1. This new task configuration is shown
1156
+ in Table 5. R4 writes a different constant value from
1157
+ R1 to the global variable A. We apply mARPREC
1158
+ mutation operator to this new example, which adds
1159
+ γ1 to precr4. R4 requires R1 execute first so R4 over-
1160
+ writes the value written by R1. The operator changes
1161
+ the execution order of runnables.
1162
+ Table 5. Task configuration settings for runnable precedence
1163
+ mutation operators.
1164
+ Task
1165
+ Period
1166
+ Execution
1167
+ Priority
1168
+ Runnable
1169
+ (ms)
1170
+ Time(ms)
1171
+ T1
1172
+ 10
1173
+ 2
1174
+ 2
1175
+ R1
1176
+ T2
1177
+ 20
1178
+ 2
1179
+ 1
1180
+ R2
1181
+ T3
1182
+ 20
1183
+ 2
1184
+ 1
1185
+ R3
1186
+ T1
1187
+ 10
1188
+ 2
1189
+ 1
1190
+ R4
1191
+ In example one, T2 has two runnables R2 and R3
1192
+ with a precedence relation between them. We apply
1193
+ mRRPREC runnable remove precedence mutation
1194
+ operator to remove the precedence γ2 from precr3.
1195
+ We schedule R3 runs before R2 since no precedence
1196
+ constraint that turns out different than the original
1197
+ simulation. The original output of R3 is an increas-
1198
+ ing value along with the execution instead of a value
1199
+ of either zero or a fixed value. The reason is that
1200
+ R3 executes first and it reads A before R2 writes any
1201
+ new value to A. The bottom output line in Figure 20
1202
+ shows the execution result.
1203
+ Fig. 20. The outputs of example one three runnables.
1204
+ 4.5.
1205
+ Priority mutation operators
1206
+ Table 6. Priority mutation operator example settings
1207
+ Task
1208
+ Period
1209
+ Execution
1210
+ Priority
1211
+ Runnable
1212
+ (ms)
1213
+ Time(ms)
1214
+ T1
1215
+ 10
1216
+ 1
1217
+ 4
1218
+ R1
1219
+ T2
1220
+ 10
1221
+ 2
1222
+ 3
1223
+ R2
1224
+ T3
1225
+ 10
1226
+ 3
1227
+ 2
1228
+ R3
1229
+ We apply mITPRI operator to the example in Ta-
1230
+ ble 6 to increase the priority of T3. This mutation
1231
+ operator changes the priority of prio3 to proi3 + 3
1232
+ so the T3 has the highest priority 5 in this example,
1233
+ which results in T3 being executed at first. Figure 21
1234
+ shows T3 is triggered first in the task execution Gantt
1235
+ chart. This mutation alters the task execution order.
1236
+
1237
+ 1
1238
+
1239
+
1240
+
1241
+
1242
+
1243
+
1244
+ 0.5
1245
+ 0
1246
+ 0
1247
+ 0.01
1248
+ 0.02
1249
+ 0.03
1250
+ 0.04
1251
+ 0.05
1252
+ 0.06
1253
+ 1
1254
+ 0
1255
+ 0
1256
+ 0. b1
1257
+ 0.02
1258
+ 0.03
1259
+ 0.04
1260
+ 0.05
1261
+ 0.06
1262
+ 1
1263
+ 1
1264
+ 1
1265
+ 0
1266
+ 0
1267
+ 0.01
1268
+ 0.02
1269
+ 0.03
1270
+ 0.04
1271
+ 0.05
1272
+ 0.061
1273
+ 0.5
1274
+ 0
1275
+ 4.01
1276
+ 0.02
1277
+ 0.03
1278
+ 0.04
1279
+ 0.05
1280
+ 0.06
1281
+ 1
1282
+ 0.5
1283
+ 0
1284
+ 0
1285
+ 0. 01
1286
+ 0.02
1287
+ 0.03
1288
+ 0.04
1289
+ 0.05
1290
+ 0.06
1291
+ L
1292
+ 0
1293
+ 0.01
1294
+ 0.02
1295
+ 0.03
1296
+ 0.04
1297
+ 0.05
1298
+ 0.0611
1299
+ 10
1300
+ 9
1301
+ 0
1302
+ 0.01
1303
+ 0.02
1304
+ 0.03
1305
+ 0.04
1306
+ 0.05
1307
+ 0.06
1308
+ 0.07
1309
+ 0.08
1310
+ 0.09
1311
+ 0.1
1312
+ 40
1313
+ 20
1314
+ 0
1315
+ 0.01
1316
+ 0.02
1317
+ 0.03
1318
+ 0.04
1319
+ 0.05
1320
+ 0.06
1321
+ 0.07
1322
+ 0.08
1323
+ 0.09
1324
+ 0.1
1325
+ 10 F
1326
+ 5
1327
+ 0
1328
+ 0
1329
+ 0.01
1330
+ 0.02
1331
+ 0.03
1332
+ 0.04
1333
+ 0.05
1334
+ 0.06
1335
+ 0.07
1336
+ 0.08
1337
+ 0.09
1338
+ 0.1J. Chen et al. / Mutation Operators for Simulink Models
1339
+ Fig. 21. Task executions Gantt chart after applying increas-
1340
+ ing task priority mutation operator.
1341
+ We apply mDTPRI operator to decrease the pri-
1342
+ ority of T1. This mutation operator changes the pri-
1343
+ ority of prioi to proi−3 so the T1 has the lowest pri-
1344
+ ority 1 in this example, which results in T1 being
1345
+ executed at last. The task execution Gantt chart is
1346
+ shown in Figure 22.
1347
+ Fig. 22. Task executions Gantt chart after applying decreas-
1348
+ ing task priority mutation operator.
1349
+ 4.6.
1350
+ Jitter mutation operators
1351
+ We apply mITJ operator to increase a jitter time of
1352
+ a task. For example, let δ = 2, this mutation op-
1353
+ erator changes the real release time of the task to
1354
+ jitter1 = 0+2. Figure 23 shows the execution of T2
1355
+ is preempted by T1 caused by the jitter.
1356
+ Fig. 23. Task executions Gantt chart after applying increas-
1357
+ ing jitter mutation operator.
1358
+ Then mDTJ mutation operator decreases the jit-
1359
+ ter time of a task. We apply this operator to the
1360
+ above example and let δ = −1 so the task jitter1 =
1361
+ 2 − 1. T2 is preempted by T1 during the simulation
1362
+ phase.
1363
+ 4.7.
1364
+ Shared memory mutation operators
1365
+ In this shared memory category, we introduce five
1366
+ mutation operators.
1367
+ The first one is mDSM, and
1368
+ this operator assigns a new value to the memory
1369
+ store before a read.
1370
+ For our example, we add a
1371
+ Data Store Write block right before the Data Store
1372
+ Read execution so that the Data Store Write block
1373
+ defines a new value to the variable, and we chose
1374
+ the initial value of this variable as the default new
1375
+ value. The mutant using mUDSM operator is shown
1376
+ in Figure 24, which only shows the changes of
1377
+ Runnable2 subsystem. We add a constant block and
1378
+ a Data Store Write block at the top left corner.
1379
+ Fig. 24. A simple example of DSM mutant.
1380
+ The second mutant operator is mUDSM, and this
1381
+ operator disregards a write to a Data Store block.
1382
+ For our example, we remove the Data Store Write
1383
+ block. Figure 25 shows the mUDSM mutant that the
1384
+ Data Store Write has been removed.
1385
+ Fig. 25. A simple example of UDSM mutant.
1386
+ The third mutant operator is mRDSM, and this
1387
+ operator removes an initialization value to a Data
1388
+ Store memory. In many programs, variables require
1389
+
1390
+ 1F
1391
+ 1
1392
+ 0.5
1393
+ 0
1394
+ /o
1395
+ 0.01
1396
+ 0.02
1397
+ 0.03
1398
+ 0.04
1399
+ 0.05
1400
+ 0.06
1401
+ 1
1402
+ 1
1403
+ 0.5
1404
+ 0
1405
+ 0.01
1406
+ 0.02
1407
+ 0.03
1408
+ 0.04
1409
+ 0.05
1410
+ 0.06
1411
+ 0.5
1412
+ 0.01
1413
+ 0.02
1414
+ 0.03
1415
+ 0.04
1416
+ 0.05
1417
+ 0.061 F
1418
+ 0.5
1419
+ 0
1420
+ /
1421
+ 0.01
1422
+ 0.02
1423
+ 0.03
1424
+ 0.04
1425
+ 0.05
1426
+ 0.06
1427
+ 1
1428
+ 0.01
1429
+ 0
1430
+ 0.02
1431
+ 0.03
1432
+ 0.04
1433
+ 0.05
1434
+ 0.06
1435
+ 0.5
1436
+ 0
1437
+ 0.01
1438
+ 0.02
1439
+ 0.03
1440
+ 0.04
1441
+ 0.05
1442
+ 0.061
1443
+ 0.5
1444
+ 0
1445
+ 0
1446
+ 0.01
1447
+ 0.02
1448
+ 0.03
1449
+ 0.04
1450
+ 0.05
1451
+ 0.06
1452
+ 0.5
1453
+ 0
1454
+ 0.01
1455
+ 0.02
1456
+ 0.03
1457
+ 0.04
1458
+ 0.05
1459
+ 0.06f()
1460
+ 2:0
1461
+ 2:1
1462
+ function
1463
+ A
1464
+ 0
1465
+ 2:4
1466
+ 2:2
1467
+ A
1468
+ 2:5
1469
+ A
1470
+ 2:3
1471
+ Zf()
1472
+ function
1473
+ A
1474
+ 1
1475
+ Z.J. Chen et al. / Mutation Operators for Simulink Models
1476
+ an initial value before they can use properly. For
1477
+ our example, Runnable1 subsystem is such a pro-
1478
+ cess of initializing Data Store A; then, we remove
1479
+ the Data Store Write block in Runnable1 subsystem.
1480
+ The Simulink model can still run simulations with-
1481
+ out any issues however the output of the simulation
1482
+ only yields a single value.
1483
+ Mutant operator mRSM adds a new reference to
1484
+ shared memory. Figure 26 shows the block diagrams
1485
+ of a Simulink model with three subsystems and they
1486
+ are mapped to two tasks. The model has a DSM
1487
+ block A in the root-level system. There is a Data
1488
+ Store Write block inside subsystems Task B1 and a
1489
+ Data Store Read block in Task B2. The period of
1490
+ Task A is 5ms and the period of Task B is 10ms.
1491
+ To implement the mRSM, we add a Data Store Read
1492
+ block to the TaskA subsystem which shows in Figure
1493
+ 27. In the original example, Task A executes first
1494
+ then Task B1 writes A and Task B2 reads A. The
1495
+ mutant program has the same execution order as the
1496
+ original model. However, when the Data Store Read
1497
+ block in Task A executes, the block reads data from
1498
+ an uninitialized data store or a previous instant of
1499
+ Task B1 as Task B has not executed yet or has been
1500
+ executed previously.
1501
+ Fig. 26. A simple Simulink model.
1502
+ Fig. 27. An example of mRSM mutant operator. Adding a
1503
+ Data Store Read block to Task A block.
1504
+ Mutant operator mRMSMR deletes a reference to
1505
+ shared memory. In Figure 26, Task B2 has a refer-
1506
+ ence to a DSM block A in the root-level system. To
1507
+ implement the mRMSMR, we delete the Data Store
1508
+ Read block in the TaskB2 subsystem. In the mu-
1509
+ tant program, Task B2 has a constant output value
1510
+ of zero since there is no reference.
1511
+ 5.
1512
+ Evaluation Phase
1513
+ In the previous section, we describe how a model
1514
+ scheduler SimSched can validate the real-time con-
1515
+ text during a simulation, and we utilize mutation
1516
+ testing to evaluate SimSched. In this section, we
1517
+ perform experiments to demonstrate the use of our
1518
+ mutation testing framework to evaluate the quality
1519
+ of SimSched and Stateflow schedulers in scheduling
1520
+ tasks in real-time systems.
1521
+ 5.1.
1522
+ Evaluation Process
1523
+ To validate the proposed mutation operators, we ap-
1524
+ ply them to ML/SL models. We separate the evalu-
1525
+ ation process into two parts base and extension, ac-
1526
+ cording to the ability of ML/SL. We apply the first-
1527
+ order mutants (FOMs) 22 to ML/SL models to gen-
1528
+ erate a mutant, which means we generate a mutant
1529
+ by using a mutation operator only once.
1530
+ 5.1.1. Base Case
1531
+ In the base case, we examine the simulation results
1532
+ of the original models and the SimSched models and
1533
+ their mutants. An original model M is an ML/SL
1534
+ model scheduled by Stateflow scheduler; A Sim-
1535
+ Sched model M ′ is an original model scheduled
1536
+ by SimSched; The mutants (Mµ or M ′
1537
+ µ) are ei-
1538
+ ther original model or SimSched models mutated by
1539
+ one of our mutation operators. Figure 28 shows the
1540
+ schematic diagram of our mutants generation pro-
1541
+ cess. We use the simulation result of M as a com-
1542
+ parison baseline, and then we compare the baseline
1543
+ with every other simulation result of Mµ, and M ′
1544
+ µ.
1545
+ We examine the comparison result to see if the re-
1546
+ sult reaches a verdict failure during model simula-
1547
+ tion. We say a mutant is killed if a verdict of failure
1548
+ is reached.
1549
+
1550
+ In2
1551
+ Out1
1552
+ A
1553
+ Task A
1554
+ Out1
1555
+ In2
1556
+ In1
1557
+ Qut3
1558
+ Task B1
1559
+ Task_B2A
1560
+ 2
1561
+ DSRA
1562
+ 2J. Chen et al. / Mutation Operators for Simulink Models
1563
+ Fig. 28. Schematic diagram of the model mutants genera-
1564
+ tion process.
1565
+ Fig. 29. Simple evaluation example scheduled by Stateflow
1566
+ scheduler.
1567
+ We use a simple example shown in Figure 29 to
1568
+ explain the base case evaluation process. This ex-
1569
+ ample is an original model. We replace the State-
1570
+ flow scheduler with a SimSched scheduler to form
1571
+ a SimSched model. We generate mutants for both
1572
+ the original and SimSched models by a specific mu-
1573
+ tation operator, e.g., mDTPER, to decrease the task
1574
+ period. Then we run the simulation for both mutants
1575
+ and analyzed the results to see if there is any errors.
1576
+ If the simulation result of Mµ or M ′
1577
+ µ is different
1578
+ from the original model and shows a verdict failure,
1579
+ then we say the mutant is killed.
1580
+ In this example,
1581
+ we have three runnables
1582
+ R1,R2,R3 and they are mapped to two tasks T1,T2.
1583
+ R1 is mapped to T1 and R2,R3 are mapped to T2. The
1584
+ period of T1 is 3ms and The period of T2 is 6ms.
1585
+ The execution time of each runnable is 1ms. The
1586
+ simulation result of the M is shown in Figure 30
1587
+ and it shows each runnable output is a rising non-
1588
+ interlaced polyline. We apply the mDTPER muta-
1589
+ tion operator as decreasing 1ms to both the origi-
1590
+ nal model and SimSched model to generate mutants.
1591
+ The task T1 in the mutants has a period of 2ms. The
1592
+ simulation result of these simulations is shown in
1593
+ Figure 31 and Figure 32. The simulation result of
1594
+ M ′
1595
+ µ is different from the result of M , and it shows
1596
+ the output of R2 and R3 are two rising interlaced
1597
+ polylines because SimSched can simulate the exe-
1598
+ cution time and preemption. T1 preempts T2 in the
1599
+ SimSched mutant model to yield an alternative ex-
1600
+ ecution trace, and we say a verdict fail is reached.
1601
+ However, the simulation result of Mµ is similar to
1602
+ the result of M . Thus, the mDTPER mutant is killed
1603
+ to the M ′
1604
+ µ and is alive to the Mµ. We can not apply
1605
+ this means to all mutation operators due to the nature
1606
+ of ML/SL. We combine this method and the follow-
1607
+ ing method to evaluate the mutation operators.
1608
+ Fig. 30. M simulation result.
1609
+ Fig. 31. Mµ simulation result.
1610
+ Fig. 32. M ′µ simulation result
1611
+ 5.1.2. Extension
1612
+ To evaluate the rest of the mutation operators, we
1613
+ implement a mutation generator with additional
1614
+ functionalities to assist the validation process. One
1615
+ feature is to check the mutant model’s schedulabil-
1616
+ ity at the given set of tasks configuration to decide if
1617
+ all task deadlines are met. The other function is to
1618
+
1619
+ M'
1620
+ M
1621
+ SimSched
1622
+ Mutate
1623
+ Mutate
1624
+ M
1625
+ M'
1626
+ μ
1627
+ n1 ms Clock
1628
+ A
1629
+ R1()
1630
+ R3()
1631
+ R2()
1632
+ Sall(
1633
+ R2
1634
+ R1
1635
+ call()
1636
+ R3
1637
+ R10
1638
+ R2()
1639
+ R30
1640
+ Scheduler90
1641
+ R1
1642
+ R2
1643
+ R3
1644
+ 80
1645
+ 70
1646
+ 60
1647
+ 50
1648
+ 40
1649
+ 30
1650
+ 20
1651
+ 10
1652
+ 0
1653
+ -10
1654
+ 0
1655
+ 0.01
1656
+ 0.02
1657
+ 0.03
1658
+ 0.04
1659
+ 0.05
1660
+ 0.06R1
1661
+ R2
1662
+ 120
1663
+ R3
1664
+ 100
1665
+ 80
1666
+ 60
1667
+ 40
1668
+ 20
1669
+ 0
1670
+ 0
1671
+ 0.01
1672
+ 0.02
1673
+ 0.03
1674
+ 0.04
1675
+ 0.05
1676
+ 0.06R1
1677
+ 60
1678
+ R2
1679
+ R3
1680
+ 50
1681
+ 40
1682
+ 30
1683
+ 20
1684
+ 10
1685
+ 0
1686
+ 0.01
1687
+ 0.02
1688
+ 0.03
1689
+ 0.04
1690
+ 0.05
1691
+ 0.06
1692
+ Offset=0J. Chen et al. / Mutation Operators for Simulink Models
1693
+ check the data access sequence. If there is a DataS-
1694
+ tore block in the mutated model, every read or write
1695
+ to this DataStore block is recorded. Then we use
1696
+ this mutated model data access sequence to com-
1697
+ pare with the original model data access sequence.
1698
+ The mutation generator is implemented as a Matlab
1699
+ script written in m-file.
1700
+ The validation process takes a Stateflow sched-
1701
+ uled ML/SL model and a test specification as input.
1702
+ The test specification specifies which mutation op-
1703
+ erator to use. A mutant generator applies the speci-
1704
+ fied mutation operator to the ML/SL model via Sim-
1705
+ Sched and generates a mutant. The mutant genera-
1706
+ tor then executes the simulation both for the original
1707
+ model and the mutated model using the additional
1708
+ functionalities to analyze the simulation. If the anal-
1709
+ ysis shows at least one task misses its deadline in a
1710
+ mutated model, then we say a mutant is killed. Or
1711
+ at least one variable comparison result of the DataS-
1712
+ tore access sequence is unmatching, and then we say
1713
+ a mutant is killed; otherwise, we report the mutant
1714
+ is benign.
1715
+ Fig. 33. A simple example of using Model Scheduler to
1716
+ schedule AUTOSAR SW-Cs.
1717
+ We use an example shown in Figure 33 to ex-
1718
+ plain the validation process. It has three runnables
1719
+ and is mapped to two tasks, R1 map to T1, R2, and
1720
+ R3 map to T2. The period of task T1 is 10ms, and
1721
+ T2 is 20ms, every runnable’s execution time is 3ms.
1722
+ There is a DataStore block named A as a shared vari-
1723
+ able in this example model. If we apply the period
1724
+ mutation operator mDTPER ρi − δ where i − 1 and
1725
+ δ = 6 to this model to decrease the period of T1 and
1726
+ generate a mutant, run it. The analysis result shows
1727
+ the T2 missed deadline, then we say this mutant is
1728
+ killed. If we apply the execution time mutation op-
1729
+ erator mITET ci + δ where i = 1 and δ = 3 to this
1730
+ model to increase the execution time for T1 and gen-
1731
+ erate a mutant. The DataStore access sequence of
1732
+ the original model is a pattern of WRWR where W
1733
+ represents a write to the shared variable, and R rep-
1734
+ resents a read to the shared variable. The mutant
1735
+ generates a different sequence, which is WRWWR.
1736
+ It is because the T1 has a longer execution time than
1737
+ the original model, and it preempts T2 during the ex-
1738
+ ecution of T2. Hence, there is one more W in the
1739
+ DataStore access sequence.
1740
+ 5.2.
1741
+ Experiments
1742
+ We employ two examples to demonstrate the use of
1743
+ our mutation testing framework. We first explain the
1744
+ two examples in detail. We then apply the mutation
1745
+ operators to the two models scheduled by both the
1746
+ Stateflow scheduler and SimSched.
1747
+ Fig. 34.
1748
+ The three-servo example adapted from 12 with
1749
+ Stateflow scheduler.
1750
+ 5.2.1. Three Servos Model
1751
+ We adapt an example from the TrueTime 21 exam-
1752
+ ple library, which shows a possible implementation
1753
+ of a three-servo PID control system. The example
1754
+ is shown in Figure 34 with a Stateflow scheduler.
1755
+ In this example, three DC servos are modeled by a
1756
+ continuous-time system, and three PID controllers
1757
+ are implemented as three subsystems. We map three
1758
+ controller subsystems to three runnables R1, R2, and
1759
+ R3 then they are mapped to tasks T1, T2, and T3. The
1760
+ task periods are T1=4 , T2 = 5 and T3 = 6 ms re-
1761
+ spectively. Each task has the same execution time as
1762
+
1763
+ Scheduler
1764
+ Runnable(period, execution time, priority) Task()
1765
+ function()
1766
+ function()
1767
+ function()
1768
+ R1(10ms,3ms,2)j
1769
+ Task(1)
1770
+ R2( 20ms, 3ms, 1
1771
+ Task(2)
1772
+ R3( 20ms, 5ms, 1
1773
+ Task(2
1774
+ function
1775
+ irv2
1776
+ irv1
1777
+ irv2
1778
+ SimSched
1779
+ irv3
1780
+ Aader
1781
+ Runnable1_subsystem
1782
+ Runnable2_subsystem
1783
+ Runnable3_subsystem1 ms Clock
1784
+ callo
1785
+ R10
1786
+ R2()
1787
+ R30
1788
+ call(
1789
+ Temporal Logic
1790
+ Scheduler
1791
+ functionO
1792
+ 1000
1793
+ u
1794
+ s2+s
1795
+ DCServo1
1796
+ PID1
1797
+ functionO
1798
+ 1000
1799
+ u
1800
+ 2+s
1801
+ DCServo2
1802
+ PID2
1803
+ function()
1804
+ 1000
1805
+ u
1806
+ s?+s
1807
+ DCServo3
1808
+ PID3J. Chen et al. / Mutation Operators for Simulink Models
1809
+ 1ms. Task settings are shown in Table 7. The simula-
1810
+ tion result is shown in Figure 35 based on the above
1811
+ task settings. The three graphs show the output of
1812
+ the motors using the three PID controllers when the
1813
+ corresponding task parameters are assigned accord-
1814
+ ingly. In the graph, the square wave is the reference
1815
+ input signal for the motors, where the computation
1816
+ delays are not taken into account. Three PID con-
1817
+ trollers are all smooth output signals as expected.
1818
+ Table 7. Three Servo example settings.
1819
+ Task
1820
+ Period
1821
+ Execution
1822
+ Priority
1823
+ Runnable
1824
+ (ms)
1825
+ Time(ms)
1826
+ T1
1827
+ 4
1828
+ 1
1829
+ 3
1830
+ R1
1831
+ T2
1832
+ 5
1833
+ 1
1834
+ 2
1835
+ R2
1836
+ T3
1837
+ 6
1838
+ 1
1839
+ 1
1840
+ R3
1841
+ Fig. 35.
1842
+ The three servos example output with Stateflow
1843
+ scheduler.
1844
+ We replace the Stateflow scheduler with the Sim-
1845
+ Sched scheduler, and the updated example is shown
1846
+ in Figure 36. In this example, three DC servos have
1847
+ the same task setting as the Stateflow scheduler ex-
1848
+ ample. Each runnable has the same execution time
1849
+ as 1ms. The simulation result is the same as the
1850
+ Stateflow scheduler example based on the above task
1851
+ settings. There is no deadline missing for any task,
1852
+ so the simulation result shows every task has smooth
1853
+ control. Figure 37 shows the task active chart gen-
1854
+ erated by SimSched. Every task has been executed
1855
+ within its own deadline.
1856
+ Fig. 36. The three servos example adapted from 12.
1857
+ Fig. 37. The three servos example task active chart gener-
1858
+ ated by SimSched.
1859
+ Fig. 38. The adjusted Stateflow scheduler for mITO muta-
1860
+ tion operator to increase o f fset as 1ms for DCServo1.
1861
+
1862
+ 2
1863
+ DCServo1
1864
+ 1
1865
+ SignalGenerator
1866
+ 0
1867
+ -1
1868
+ -2
1869
+ 0
1870
+ 0.1
1871
+ 0.2
1872
+ 0.3
1873
+ 0.4
1874
+ 0.5
1875
+ 0.6
1876
+ 0.7
1877
+ 0.8
1878
+ 0.9
1879
+ 2
1880
+ DCServo2
1881
+ 1
1882
+ SignalGenerator
1883
+ 0
1884
+ -1
1885
+ 0
1886
+ 0.1
1887
+ 0.2
1888
+ 0.3
1889
+ 0.4
1890
+ 0.5
1891
+ 0.6
1892
+ 0.7
1893
+ 0.8
1894
+ 0.9
1895
+ 2
1896
+ DCServo3
1897
+ SignalGenerator
1898
+ 0
1899
+ 0
1900
+ 0.1
1901
+ 0.2
1902
+ 0.3
1903
+ 0.4
1904
+ 0.5
1905
+ 0.6
1906
+ 0.7
1907
+ 0.8
1908
+ 0.9Scheduler
1909
+ Runnable(period, execution time, priority) Task()
1910
+ E
1911
+ R1(4ms, 2ms, 3) Task(1)
1912
+ R2( 5ms, 2ms, 2
1913
+ Task(2)
1914
+ R3( 6ms, 2ms, 1
1915
+ Task(3
1916
+ SimSched
1917
+ function(
1918
+ ○○
1919
+ 1000
1920
+ u
1921
+ s2+s
1922
+ DCServo1
1923
+ PID1
1924
+ function()
1925
+ 1000
1926
+ u
1927
+ 2+s
1928
+ y
1929
+ DCServo2
1930
+ PID2
1931
+ function()
1932
+ 1000
1933
+ u
1934
+ 32+s
1935
+ DCServo3
1936
+ PID30.5
1937
+ o
1938
+ 0
1939
+ 0. 01
1940
+ 0.02
1941
+ 0.03
1942
+ to0
1943
+ 0. 05
1944
+ 0.06
1945
+ 0.5
1946
+ 0
1947
+ 0.01
1948
+ 0.02
1949
+ 0.03
1950
+ to0
1951
+ 0.05
1952
+ 0.06
1953
+ 1F
1954
+ 0.5
1955
+ 0
1956
+ 0.01
1957
+ 0.02
1958
+ 0.03
1959
+ to0
1960
+ 0.05
1961
+ 0.06 Scheduler
1962
+ on at(1,tick): Period_4_ms;
1963
+ du: on every(5,tick) : Rate5ms;
1964
+ du: on every(6,tick) : Rate6ms;
1965
+ 'Sched_4_MS
1966
+ Periodicl
1967
+ on every(4,tick) : Period_4_ms;
1968
+ Sched 5 MS
1969
+ Periodicl
1970
+ en: Period_5_ms;
1971
+ Rate5ms
1972
+ Sched 6 MS
1973
+ Periodic/
1974
+ en: Period_6_ms;
1975
+ Rate6msJ. Chen et al. / Mutation Operators for Simulink Models
1976
+ Next step, we apply mITO, mDTO, mITPER,
1977
+ mDTPER, mARPREC, mRRPREC, mITJ, mDTJ
1978
+ mutation operators to both Stateflow scheduler and
1979
+ SimShced examples to generate two versions of mu-
1980
+ tants with the same mutation operators.
1981
+ To ap-
1982
+ ply some of the mutation operators to evaluate the
1983
+ Stateflow scheduler, we need to adjust the State-
1984
+ flow scheduler so that it can be used on the gen-
1985
+ erated mutants. Figure 38 shows an example that
1986
+ is adjusted for the Offset mutation operator. This
1987
+ example uses a temporal logic operator at in the
1988
+ state to set the Offset parameter to generate a mu-
1989
+ tant for PID1 which runs at the period of 4ms in
1990
+ this example. This mutant increases Offset as 1ms
1991
+ for DCServo1 controlled by PID1. The mutant of
1992
+ the SimSched version can be easily generated by our
1993
+ model scheduler SimSiched.
1994
+ Fig. 39. The Stateflow scheduled three-servo example task
1995
+ active chart after applying mITO mutation operator to in-
1996
+ crease o f fset as 1ms for DCServo1.
1997
+ We run simulations for both versions of the mu-
1998
+ tants generated by Offset mutation operator. Both
1999
+ mutant versions’ output of three servos is the same
2000
+ as shown in Figure 35. The only difference occurs
2001
+ at the beginning of the simulation but it does not af-
2002
+ fect the smooth control of DCServos. We can see
2003
+ the difference from the following comparison. Fig-
2004
+ ure 39 shows the Stateflow scheduled task active
2005
+ chart after applying mITO mutation operator to in-
2006
+ crease offset as 1ms for DCServo1. Before apply-
2007
+ ing the mutation operator, every task is released at
2008
+ time 0. After applying the offset mutation opera-
2009
+ tor, Task 1 is delayed by 1ms shown on the top of
2010
+ the figure. Task 2 and Task 3 are both released at
2011
+ time 0. Figure 40 shows the SimSched scheduled
2012
+ three servos example task active chart after applying
2013
+ mITO mutation operator to increase offset as 1ms for
2014
+ DCServo1. There are three output signals represent-
2015
+ ing three tasks from top to bottom T1, T2, and T3.
2016
+ As T1 has a 1ms offset, Task 2 is executed first as
2017
+ shown in the figure the second line starts at time 0.
2018
+ Because the SimSched scheduler has the execution
2019
+ time parameter, Task 2 is executed at time 1 and Task
2020
+ 3 at time 2 respectively.
2021
+ Fig. 40. The SimSched scheduled three servos example task
2022
+ active chart after applying mITO mutation operator to in-
2023
+ crease Offset as 1ms for DCServo1.
2024
+ We use a similar approach to apply Period muta-
2025
+ tion operator to the three-servo example and gener-
2026
+ ate mutants for both the Stateflow scheduler model
2027
+ and SimSched model. We use two mutation con-
2028
+ figurations to show the similarities and differences
2029
+ between the two schedulers. The first configuration
2030
+ is [1,5,6]. It means Task 1 has a period of 1ms and
2031
+ the period of Task 2 and Task 3 keep the same as
2032
+ 5ms and 6ms, respectively.
2033
+ Figure 41 shows the
2034
+ Stateflow scheduler mutant simulation result. Be-
2035
+ cause Task 1 has a 1ms period, it has too many times
2036
+ of calculations, and the output value is out of the
2037
+ chart. Task 2 and Task 3 keep the same output as be-
2038
+ fore. Figure 42 shows the SimSched mutant simula-
2039
+ tion result. The output of Task 1 is the same as the
2040
+ Stateflow scheduler mutant. However, Task 2 and
2041
+ Task 3 are different from the Stateflow one. Because
2042
+ the SimSched takes the execution time into account,
2043
+ Task 1 has the highest priority and has an execution
2044
+ time of 1ms, and Task 1 always runs during the sim-
2045
+ ulation. Task 2 and Task 3 always are preempted by
2046
+ Task 1 because they have a lower priority than Task
2047
+ 1.
2048
+ Fig. 41. The Stateflow scheduled three servos example out-
2049
+ put after applying mDTPER mutation operator to decrease
2050
+ period as 1ms for DCServo1.
2051
+
2052
+ PID1 4 ms 1ms offset
2053
+ 0.01
2054
+ 0.02
2055
+ 0.03
2056
+ 0.04
2057
+ 0.05
2058
+ 0.06
2059
+ PID2 5 ms no offset
2060
+ 1
2061
+ 0.5
2062
+ 0
2063
+ 0.01
2064
+ 0.02
2065
+ 0.03
2066
+ 0.04
2067
+ 0.05
2068
+ 0
2069
+ 0.06
2070
+ PID3 6 ms no offset
2071
+ 1
2072
+ 0.5
2073
+ 0
2074
+ 0
2075
+ 0.01
2076
+ 0.02
2077
+ 0.03
2078
+ 0.04
2079
+ 0.05
2080
+ 0.061
2081
+ 0.5,
2082
+ 0
2083
+ 0
2084
+ 0.01
2085
+ 0.02
2086
+ 0.03
2087
+ 0.04
2088
+ 0.05
2089
+ 0.06
2090
+ 1
2091
+ 0.5
2092
+ 0
2093
+ 0.01
2094
+ 0.02
2095
+ 0.03
2096
+ to'0
2097
+ 0.05
2098
+ 0.06
2099
+ 1
2100
+ 0.5
2101
+ 0
2102
+ 0
2103
+ 0.01
2104
+ 0.02
2105
+ 0.03
2106
+ to'0
2107
+ 0.05
2108
+ 0.062
2109
+ DCServo1
2110
+ SignalGenerator
2111
+ 0
2112
+ -2
2113
+ 0
2114
+ 0.1
2115
+ 0.2
2116
+ 0.3
2117
+ 0.4
2118
+ 0.5
2119
+ 0.6
2120
+ 0.7
2121
+ 0.8
2122
+ 0.9
2123
+ 2
2124
+ DCServo2
2125
+ SignalGenerator
2126
+ 0
2127
+ -2
2128
+ 0
2129
+ 0.1
2130
+ 0.2
2131
+ 0.3
2132
+ 0.4
2133
+ 0.5
2134
+ 0.6
2135
+ 0.7
2136
+ 0.8
2137
+ 0.9
2138
+ 2
2139
+ DCServo3
2140
+ SignalGenerator
2141
+ 0
2142
+ -2
2143
+ 0
2144
+ 0.1
2145
+ 0.2
2146
+ 0.3
2147
+ 0.4
2148
+ 0.5
2149
+ 0.6
2150
+ 0.7
2151
+ 0.8
2152
+ 0.9J. Chen et al. / Mutation Operators for Simulink Models
2153
+ Fig. 42. The SimSched scheduled three servos example out-
2154
+ put after applying mDTPER mutation operator to decrease
2155
+ period as 1ms for DCServo1.
2156
+ The second configuration is [13,5,6]. It means
2157
+ Task 1 has a period of 13ms and the period of Task
2158
+ 2 and Task 3 keep the same as 5ms and 6ms, re-
2159
+ spectively. Figure 43 shows the Stasflow scheduler
2160
+ mutant simulation result and the SimSched mutant
2161
+ has the same output as shown in the figure. Because
2162
+ Task 1 has a 13ms period, it has fewer computations
2163
+ than the original model. Although the output behav-
2164
+ ior looks like Task 1 misses its deadline in the figure,
2165
+ every execution of Task 1 meets its deadline and it
2166
+ is executed as scheduled.
2167
+ Fig. 43. The Stateflow scheduler three servos example out-
2168
+ put after applying mITPER mutation operator to increase
2169
+ period as 13ms for DCServo1.
2170
+ We
2171
+ generate
2172
+ mutants
2173
+ for
2174
+ mARPREC
2175
+ and
2176
+ mRRPREC operators by setting each parallel state’s
2177
+ execution order in the Stateflow scheduler model
2178
+ and configuring the parameters and connections for
2179
+ the SimSched model. The two mutants’ simulation
2180
+ results are the same as the original model, except
2181
+ that each task’s execution order is different from the
2182
+ original model.
2183
+ We generate mutants for mITJ and mDTJ op-
2184
+ erators by adapting the Stateflow scheduler in the
2185
+ model and configuring the parameters in the Sim-
2186
+ Sched model. We set the configuration as [1,0,0].
2187
+ It means only Task 1 has a jitter as 1ms. Figure 44
2188
+ shows the SimSched scheduler three servos example
2189
+ task active chart after applying mITJ mutation oper-
2190
+ ator to increase jitter as 1ms for DCServo1. Be-
2191
+ cause Task 1 has 1ms jitter, Task 2 is executed first
2192
+ then Task1 and Task3.
2193
+ Fig. 44. The SimSched scheduler three servos example task
2194
+ active chart after applying mITJ mutation operator to in-
2195
+ crease jitter as 1ms for DCServo1.
2196
+ We only apply the execution time operator to the
2197
+ SimSched model due to the lack of support for the
2198
+ Stateflow scheduler. Figure 45 shows the effect out-
2199
+ put of mITET mutation operator. We set c1 = 3ms
2200
+ using the mITET mutation operator for Task 1 to
2201
+ generate a mutant. The output of Task 3, shown as
2202
+ DCservo 3 at the bottom of the figure, is a curly
2203
+ wave. It is an unstable control due to the preemp-
2204
+ tion by T1 and T3 missing its deadline. Figure 46
2205
+ shows the task preemption effect. Task 1 takes 3ms
2206
+ to execute and Task 2 takes 1ms to execute. After
2207
+ the execution of Task 1 and Task 2, Task 3 should be
2208
+ executed; however, it is the time that Task 1 is sched-
2209
+ uled to run. Task 1 has a higher priority, so Task 3 is
2210
+ preempted by Task 1. The first instance of Task 3 is
2211
+ executed at 19ms so Task 3 does not have a smooth
2212
+ control signal output as the other tasks. Although
2213
+ some task preemptions occur in Task 2, it does not
2214
+ miss enough deadlines to significantly affect the out-
2215
+ put. Task 2 still has a smooth output signal as shown
2216
+ in Figure 45 as the second chart.
2217
+ Fig. 45. The SimSched scheduler three servos example sig-
2218
+ nal output after applying mITET mutation operator to in-
2219
+ crease executiontime to 3ms for DCServo1.
2220
+
2221
+ 2
2222
+ DCServo1
2223
+ SignalGenerator
2224
+ 0
2225
+ -2
2226
+ 0
2227
+ 0.1
2228
+ 0.2
2229
+ 0.3
2230
+ 0.4
2231
+ 0.5
2232
+ 0.6
2233
+ 0.7
2234
+ 0.8
2235
+ 0.9
2236
+ 2
2237
+ DCServo2
2238
+ SignalGenerator
2239
+ 0
2240
+ -2
2241
+ 0
2242
+ 0.1
2243
+ 0.2
2244
+ 0.3
2245
+ 0.4
2246
+ 0.5
2247
+ 0.6
2248
+ 0.7
2249
+ 0.8
2250
+ 0.9
2251
+ 2
2252
+ DCServo3
2253
+ SignalGenerator
2254
+ 0
2255
+ -2
2256
+ 0
2257
+ 0.1
2258
+ 0.2
2259
+ 0.3
2260
+ 0.4
2261
+ 0.5
2262
+ 0.6
2263
+ 0.7
2264
+ 0.8
2265
+ 0.92
2266
+ DCServo1
2267
+ SignalGenerator
2268
+ 0
2269
+ -2
2270
+ 0
2271
+ 0.1
2272
+ 0.2
2273
+ 0.3
2274
+ 0.4
2275
+ 0.5
2276
+ 0.6
2277
+ 0.7
2278
+ 0.8
2279
+ 0.9
2280
+ 2
2281
+ DCServo2
2282
+ SignalGenerator
2283
+ 0
2284
+ -2
2285
+ 0
2286
+ 0.1
2287
+ 0.2
2288
+ 0.3
2289
+ 0.4
2290
+ 0.5
2291
+ 0.6
2292
+ 0.7
2293
+ 0.8
2294
+ 0.9
2295
+ 2
2296
+ DCServo3
2297
+ SignalGenerator
2298
+ 0
2299
+ -2
2300
+ 0
2301
+ 0.1
2302
+ 0.2
2303
+ 0.3
2304
+ 0.4
2305
+ 0.5
2306
+ 0.6
2307
+ 0.7
2308
+ 0.8
2309
+ 0.9DCServo1
2310
+ 0.5
2311
+ 0
2312
+ 0
2313
+ 0.005
2314
+ 0.01
2315
+ 0.015
2316
+ 0.02
2317
+ 0.025
2318
+ 0.03
2319
+ 0.035
2320
+ 0.04
2321
+ 0.045
2322
+ 0.05
2323
+ DCServo2
2324
+ 1
2325
+ 0.5
2326
+ 0
2327
+ 0
2328
+ 0.005
2329
+ 0.01
2330
+ 0.015
2331
+ 0.02
2332
+ 0.025
2333
+ 0.03
2334
+ 0.035
2335
+ 0.04
2336
+ 0.045
2337
+ 0.05
2338
+ DCServo3
2339
+ 1
2340
+ 0.5
2341
+ 0
2342
+ 0.005
2343
+ 0.01
2344
+ 0.015
2345
+ 0.02
2346
+ 0.025
2347
+ 0.03
2348
+ 0.035
2349
+ 0.04
2350
+ 0.045
2351
+ 0.05DCServo1
2352
+ 2
2353
+ 0
2354
+ -1
2355
+ 2
2356
+ 0.1
2357
+ 0.2
2358
+ 0.3
2359
+ 0.4
2360
+ 0.5
2361
+ 0.6
2362
+ 0.7
2363
+ 0.8
2364
+ 0.9
2365
+ DCServo2
2366
+ 2
2367
+ -1
2368
+
2369
+ 0.1
2370
+ 0.2
2371
+ 0.3
2372
+ 0.4
2373
+ 0.5
2374
+ 0.6
2375
+ 0.7
2376
+ 0.8
2377
+ 0.9
2378
+ DCServo3
2379
+ 2
2380
+ 1
2381
+ 0
2382
+ -1
2383
+ -2
2384
+ 0.1
2385
+ 0.2
2386
+ 0.3
2387
+ 0.4
2388
+ 0.5
2389
+ 0.6
2390
+ 0.7
2391
+ 0.8
2392
+ 0.9J. Chen et al. / Mutation Operators for Simulink Models
2393
+ Fig. 46. The SimSched scheduler three servos example task
2394
+ active chart after applying mITET mutation operator to in-
2395
+ crease executiontime to 3ms for DCServo1.
2396
+ 5.2.2. Throttle Position Control Model
2397
+ We adopt an AUTOSAR software component
2398
+ Simulink model from Mathworks shown in Figure
2399
+ 47. It implements a throttle position control sys-
2400
+ tem for an automobile and contains three sensors,
2401
+ one monitor, one controller, and one actuator. They
2402
+ are implemented as six subsystems and mapped to
2403
+ six runnables TPSSecondary, Monitor, Controller,
2404
+ Actuator, APPSnsr and TPSPrimary then they are
2405
+ mapped to tasks T1, and T2. The task periods are
2406
+ T1=5ms and T2 = 10 ms respectively. Each runnable
2407
+ has the same execution time of 1ms. Task settings
2408
+ are shown in Table 8. This example uses seven Data-
2409
+ StoreMemory blocks to access the shared resources.
2410
+ Fig. 47. Throttle position control Simulink model contains
2411
+ six runnables.
2412
+ Table 8. Throttle control example settings.
2413
+ Task
2414
+ Period
2415
+ Execution
2416
+ Priority
2417
+ Runnable
2418
+ (ms)
2419
+ Time(ms)
2420
+ T2
2421
+ 10
2422
+ 1
2423
+ 1
2424
+ TPSPrimary
2425
+ T1
2426
+ 5
2427
+ 1
2428
+ 2
2429
+ TPSSecondary
2430
+ T1
2431
+ 5
2432
+ 1
2433
+ 2
2434
+ Monitor
2435
+ T1
2436
+ 5
2437
+ 1
2438
+ 2
2439
+ Controller
2440
+ T1
2441
+ 5
2442
+ 1
2443
+ 2
2444
+ Actuator
2445
+ T2
2446
+ 10
2447
+ 1
2448
+ 1
2449
+ APPSnsr
2450
+ Figure 48 shows the simulation result, which is
2451
+ generated by a Stateflow scheduler. The square wave
2452
+ in the figure is the simulated pedal input, and the
2453
+ curly wave is the output of the throttle body, repre-
2454
+ senting the current throttle position. The Stateflow
2455
+ scheduler simulates the throttle control controller’s
2456
+ process well and simulates the entire control pro-
2457
+ cess.
2458
+ Fig. 48. The simulated throttle position of the throttle posi-
2459
+ tion control model scheduled by the Stateflow scheduler.
2460
+ Figure 49 shows the runnable active chart sched-
2461
+ uled by the Stateflow scheduler. All runnables are
2462
+ scheduled and executed at time 0. Runnable TP-
2463
+ SPrimary and APPSensor are mapped to T2 and they
2464
+ are scheduled and executed every 10ms and the top
2465
+ and bottom chars shown in the figure are their active
2466
+ charts. The active charts of runnable TPPSSendary,
2467
+ Monitor, Controller, and Actuator are the four charts
2468
+ in the middle of the figure. They are scheduled and
2469
+ executed every 5ms.
2470
+ Fig. 49. The runnable active chart of the throttle position
2471
+ control model scheduled by the Stateflow scheduler.
2472
+ We apply SimSched to the Stateflow scheduler
2473
+ model, and we can get a similar simulation result as
2474
+ the Stateflow scheduler example based on the above
2475
+ task settings. Figure 50 shows the task active chart
2476
+ generated by SimSched. Every task has been exe-
2477
+ cuted within its own deadline. Both task T1 and T2
2478
+ are scheduled at time 0 but only T1 is executed at
2479
+
2480
+ DCServo1
2481
+ 1
2482
+ 0.5
2483
+ 0
2484
+ 0.005
2485
+ 0.01
2486
+ 0.015
2487
+ 0.02
2488
+ 0.025
2489
+ 0.03
2490
+ 0.035
2491
+ 0.04
2492
+ 0.045
2493
+ 0.05
2494
+ DCServo2
2495
+ 1
2496
+ 0.5
2497
+ 0
2498
+ 0
2499
+ 0.005
2500
+ 0.01
2501
+ 0.015
2502
+ 0.02
2503
+ 0.025
2504
+ 0.03
2505
+ 0.035
2506
+ 0.04
2507
+ 0.045
2508
+ 0.05
2509
+ DCServo3
2510
+ 0.5
2511
+ 0
2512
+ 0.005
2513
+ 0.01
2514
+ 0.015
2515
+ 0.02
2516
+ 0.025
2517
+ 0.03
2518
+ 0.035
2519
+ 0.04
2520
+ 0.045
2521
+ 0.054
2522
+ TPSSecondaryRun5ms
2523
+ MonitorRun5ms
2524
+ ControllerRun5ms
2525
+ function()
2526
+ function()
2527
+ function(
2528
+
2529
+ ThrottlePositionSensorSecondary
2530
+ ThrottlePositionMonitor
2531
+ Controller
2532
+ 6
2533
+ APPSnsrRunl
2534
+ ActuatorRun5ms
2535
+ TPSPrimaryRuniOms
2536
+ function()
2537
+ function()
2538
+ function()
2539
+
2540
+ APPHwlOValueread
2541
+ ThrCmdHwlOValuewrite1
2542
+ ThrottlePositionSensorPrimary
2543
+ AccelerationPedalPositionSensor
2544
+ ThrottlePositionActuator
2545
+ APP_HwlO_Value
2546
+ ThrCmd HwlO ValueThrottle Pos
2547
+ 0.7
2548
+ Simulated Pedal Input
2549
+ 0.6
2550
+ Throttle Pos
2551
+ 0.5
2552
+ 0.4
2553
+ 0.3
2554
+ 0.2
2555
+ 0.1
2556
+ 0
2557
+ 0
2558
+ 0.5
2559
+ 1.5TPSPrimary
2560
+ 1
2561
+ 0.5
2562
+ 0
2563
+ 0
2564
+ 0.005
2565
+ 0.01
2566
+ 0.015
2567
+ 0.02
2568
+ 0.025
2569
+ 0.03
2570
+ 0.035
2571
+ 0.04
2572
+ 0.045
2573
+ 0.05
2574
+ TPSSecondary
2575
+ 1
2576
+ 0.5
2577
+ 0
2578
+ 0
2579
+ 0.005
2580
+ 0.01
2581
+ 0.015
2582
+ 0.02
2583
+ 0.025
2584
+ 0.03
2585
+ 0.035
2586
+ 0.04
2587
+ 0.045
2588
+ 0.05
2589
+ Monitor
2590
+ 1
2591
+ 0.5
2592
+ 0
2593
+ 0.005
2594
+ 0.01
2595
+ 0.015
2596
+ 0.02
2597
+ 0.025
2598
+ 0.03
2599
+ 0.035
2600
+ 0.04
2601
+ 0.045
2602
+ 0.05
2603
+ Controller
2604
+ 1
2605
+ 0.5
2606
+ 0
2607
+ 0.005
2608
+ 0.01
2609
+ 0.015
2610
+ 0.02
2611
+ 0.025
2612
+ 0.03
2613
+ 0.035
2614
+ 0.04
2615
+ 0.045
2616
+ 0.05
2617
+ Actuator
2618
+ 0.5
2619
+ 0
2620
+ 0.005
2621
+ 0.01
2622
+ 0.015
2623
+ 0.02
2624
+ 0.025
2625
+ 0.03
2626
+ 0.035
2627
+ 0.04
2628
+ 0
2629
+ 0.045
2630
+ 0.05
2631
+ APPSensor
2632
+ 3
2633
+ 0.005
2634
+ 0.01
2635
+ 0.015
2636
+ 0.02
2637
+ 0.025
2638
+ 0.03
2639
+ 0.035
2640
+ 0.04
2641
+ 0.045
2642
+ s0'0
2643
+ 0J. Chen et al. / Mutation Operators for Simulink Models
2644
+ time 0 due to its higher priority. T1 takes up 4ms to
2645
+ run. After the first instance of T1 is finished, T2 is
2646
+ executed. The second instance of T1 arrives at 5ms,
2647
+ which is during the middle of the execution of T2. T2
2648
+ is preempted by T1 and resumes at the completion of
2649
+ the second instance of T1.
2650
+ Fig. 50. The task level active chart of the throttle position
2651
+ control model scheduled by the SimSched scheduler.
2652
+ Figure 51 shows the runnable level active chart
2653
+ of the throttle position control model scheduled by
2654
+ the SimSched scheduler. From this figure, we can
2655
+ clearly see the activity of each runnable.
2656
+ It ex-
2657
+ actly shows the execution order of each runnable.
2658
+ Runnable TPPSSendary, Monitor, Controller, and
2659
+ Actuator are executed one after another followed by
2660
+ TPSPrimary. Runnable APPSensor is executed af-
2661
+ ter the second instance of T1 and it is the preemption
2662
+ point of T2.
2663
+ Fig. 51. The runnable active chart of the throttle position
2664
+ control model scheduled by the SimSched scheduler.
2665
+ We use the same means applied to three servos
2666
+ example to apply it to the throttle position control
2667
+ model. We adopt the Stateflow schedule and replace
2668
+ it with SimSched to generate mutants for both the
2669
+ Stateflow scheduler and SimSched for the experi-
2670
+ ments.
2671
+ Figure 52 shows the runnable active chart of the
2672
+ Throttle Position Control model scheduled by Sim-
2673
+ Sched after applying mTIO mutation operator as in-
2674
+ creasing 2ms offset for T1. The runnable Actuator
2675
+ active chart shown in the second bottom chart in the
2676
+ figure is missing its first execution. T1 takes 4ms to
2677
+ run, and it also has 2ms offset, so the total execution
2678
+ time of T1 exceeds its period 5ms. On the other hand,
2679
+ the mutant generated by the Stateflow scheduler can
2680
+ not simulate this overrun situation due to the lack of
2681
+ execution time simulation support.
2682
+ Fig. 52. The runnable active chart of Throttle Position Con-
2683
+ trol model scheduled by SimSched after applying mTIO
2684
+ mutation operator as increasing 2ms offset for T1.
2685
+ Figure 53 shows the simulated throttle position
2686
+ of the throttle position control model scheduled by
2687
+ SimSched after applying mITPER mutation opera-
2688
+ tor for T1 at 100ms. The Stateflow scheduler also
2689
+ can output the same figure. The mITPER mutation
2690
+ operator can reduce the computation times of a task
2691
+ at the same amount of time, which results in unsta-
2692
+ ble control as shown in the figure.
2693
+ Fig. 53. The simulated throttle position of the throttle po-
2694
+ sition control model scheduled by the Stateflow scheduler
2695
+ after applying mITPER mutation operator.
2696
+ Figure 54 shows the simulated throttle position
2697
+ of the throttle position control model scheduled by
2698
+ SimSched after applying mDTPER mutation opera-
2699
+ tor for T1 at 4ms. The mDTPER can result in no
2700
+ output signal for T2 because a higher rate task in-
2701
+ creases the computation times, and the lower rate
2702
+ task does not get an execution. In this example, we
2703
+ set ρ1 = 4ms using the mDTPER mutation operator,
2704
+
2705
+ Task1
2706
+ 1
2707
+ 0.5
2708
+ oE
2709
+ 0
2710
+ 0.005
2711
+ 0.01
2712
+ 0.015
2713
+ 0.02
2714
+ 0.025
2715
+ 0.03
2716
+ 0.035
2717
+ 0.04
2718
+ 0.045
2719
+ 0.05
2720
+ Task2
2721
+ 1
2722
+ 0.5
2723
+ 0
2724
+ 0
2725
+ 0.005
2726
+ 0.01
2727
+ 0.015
2728
+ 0.02
2729
+ 0.025
2730
+ 0.03
2731
+ 0.035
2732
+ 0.04
2733
+ 0.045
2734
+ 0.05TPsPrimary
2735
+ 0.5
2736
+ 10
2737
+ 0.005
2738
+ 0.01
2739
+ 0.015
2740
+ 0.02
2741
+ 0.025
2742
+ 0.03
2743
+ 0.035
2744
+ 0.04
2745
+ 0.045
2746
+ 0.05
2747
+ TPSSecondary
2748
+ 0.5
2749
+ 0
2750
+ 0.005
2751
+ 0.01
2752
+ 0.015
2753
+ 0.02
2754
+ 0.025
2755
+ 0.03
2756
+ 0.035
2757
+ 0.04
2758
+ 0.045
2759
+ 0.05
2760
+ Monitor
2761
+ 0.5
2762
+ 0
2763
+ 0
2764
+ 0.005
2765
+ 0.01
2766
+ 0.015
2767
+ 0.02
2768
+ 0.025
2769
+ 0.03
2770
+ 0.035
2771
+ 0.04
2772
+ 0.045
2773
+ 0.05
2774
+ Controller
2775
+ 0.5
2776
+ 0
2777
+ 0.005
2778
+ 0.01
2779
+ 0.015
2780
+ 0.02
2781
+ 0.025
2782
+ 0.03
2783
+ 0.035
2784
+ 0.04
2785
+ 0.045
2786
+ 0.05
2787
+ Actuator
2788
+ 0.5
2789
+ 0.005
2790
+ 0.01
2791
+ 0.015
2792
+ 0.02
2793
+ 0.025
2794
+ 0.03
2795
+ 0.035
2796
+ 0.04
2797
+ 0.045
2798
+ 0
2799
+ 0.05
2800
+ APPSnsr
2801
+ 0.5 /
2802
+ 0
2803
+ 0
2804
+ 0.005
2805
+ 0.01
2806
+ 0.015
2807
+ 0.02
2808
+ 0.025
2809
+ 0.03
2810
+ 0.035
2811
+ 0.04
2812
+ 0.045
2813
+ 0.05TPsPrimary
2814
+ 1
2815
+ 0.5
2816
+ 10
2817
+ 0
2818
+ 0.005
2819
+ 0.01
2820
+ 0.015
2821
+ 0.02
2822
+ 0.025
2823
+ 0.03
2824
+ 0.035
2825
+ 0.04
2826
+ 0.045
2827
+ 0.05
2828
+ TPSSecondary
2829
+ 1
2830
+ 0.5
2831
+ 0
2832
+ 0
2833
+ 0.005
2834
+ 0.01
2835
+ 0.015
2836
+ 0.02
2837
+ 0.025
2838
+ 0.03
2839
+ 0.035
2840
+ 0.04
2841
+ 0.045
2842
+ 0.05
2843
+ Monitor
2844
+ 1
2845
+ 0.5
2846
+ 0
2847
+ 0
2848
+ 0.005
2849
+ 0.01
2850
+ 0.015
2851
+ 0.02
2852
+ 0.025
2853
+ 0.03
2854
+ 0.035
2855
+ 0.04
2856
+ 0.045
2857
+ 0.05
2858
+ Controller
2859
+ 1
2860
+ 0
2861
+ 0.005
2862
+ 0.01
2863
+ 0.015
2864
+ 0.02
2865
+ 0.025
2866
+ 0.03
2867
+ 0.035
2868
+ 0.04
2869
+ 0.045
2870
+ 0.05
2871
+ Actuator
2872
+ 1 F
2873
+ 0.5
2874
+ 0
2875
+ 0
2876
+ 0.005
2877
+ 0.01
2878
+ 0.015
2879
+ 0.02
2880
+ 0.025
2881
+ 0.03
2882
+ 0.035
2883
+ 0.04
2884
+ 0.045
2885
+ 0.05
2886
+ APPSnsr
2887
+ 0. E
2888
+ 0
2889
+ 0.005
2890
+ 0.01
2891
+ 0.015
2892
+ 0.02
2893
+ 0.025
2894
+ 0.03
2895
+ 0.035
2896
+ 0.04
2897
+ 0.045
2898
+ 0.05Throttle Pos
2899
+ Simulated Pedal Input
2900
+ Throttle Pos
2901
+ 0.8
2902
+ 0.6
2903
+ 0.4
2904
+ 0.2
2905
+ 0
2906
+ 0.5
2907
+ 1.5J. Chen et al. / Mutation Operators for Simulink Models
2908
+ then the T2 is always preempted by T1 and does not
2909
+ have a chance to execute.
2910
+ Fig. 54. The simulated throttle position of the throttle po-
2911
+ sition control model scheduled by SimSched after applying
2912
+ mDTPER mutation operator.
2913
+ We only apply the execution time operator to
2914
+ SimSched models. We set c1 = 5ms using mITET
2915
+ mutation operator for T1 to generate a mutant. This
2916
+ mutant just outputs the same throttle position as
2917
+ shown in Figure 54. Since T1 has increased its exe-
2918
+ cution time by 1ms, it just takes up all the time slots
2919
+ in its period. T2 is preempted by T1 during the simu-
2920
+ lation process.
2921
+ We apply mARPREC and mRRPREC mutation
2922
+ operators to this throttle position control exam-
2923
+ ple. First, there is no precedence between runnable
2924
+ APPSnsr and TPSPrimary, and we use a mARPREC
2925
+ mutation operator to add precedence γAPPSnsr to
2926
+ precrTPSPrimary to generate mutants for both sched-
2927
+ ulers.
2928
+ Runnable Controller consumes the values
2929
+ produced by APPSnsr and TPSPrimary to calcu-
2930
+ late the throttle percent value for the throttle actu-
2931
+ ator. The changes in the simulation results of both
2932
+ mutants are trivial. Figure 55 shows the simulation
2933
+ result comparison between the original model and
2934
+ the SimSched mutant.
2935
+ Second, there is precedence between Controller
2936
+ and Actuator, and Controller is executed before
2937
+ Actuator. We remove the precedence from the pair
2938
+ of runnables, so the Actuator (destination) runs be-
2939
+ fore Controller (source), which changes the data de-
2940
+ pendency and delays the data. The difference in sim-
2941
+ ulation is similar to Figure 55.
2942
+ Fig. 55. The difference of simulation result between the
2943
+ original model and the mutant with mARPREC mutation
2944
+ operator scheduled by SimSichde.
2945
+ We apply mDSM, mUDSM, mRDSM, mRSM,
2946
+ mRMSMR, and mRSMR mutation operators to this
2947
+ example and generate accordingly mutants for both
2948
+ the Stateflow scheduler and SimSched.
2949
+ Inter-
2950
+ estingly, since the task time properties have not
2951
+ changed for the shared memory mutation operators,
2952
+ the simulation results are consistent between the two
2953
+ types of mutants. For example, we apply the mDSM
2954
+ mutation operator to variable ThrCmdPercentValus
2955
+ and set the variable to a new constant value. The
2956
+ simulation result of both types of mutants is the
2957
+ same shown in Figure 56. Since this variable is an
2958
+ input to a Lookup table, there is always an output
2959
+ that matches the input value and yields a correspond-
2960
+ ing value to the output. The input value is constant,
2961
+ so the throttle position’s output is a smooth curve.
2962
+ Fig. 56. The throttle position output after applying mDSM
2963
+ mutation operator to variable ThrCmdPercentValus sched-
2964
+ uled.
2965
+ 5.3.
2966
+ Evaluation Result
2967
+ We apply the evaluation process to the example
2968
+ models to investigate the mutation operator’s effec-
2969
+ tiveness to kill mutants.
2970
+ Table 9 and 10 summa-
2971
+ rize the results of the efficacy of mutation operators,
2972
+
2973
+ Throttle Pos
2974
+ Simulated Pedal Input
2975
+ 0.6
2976
+ Throttle Pos
2977
+ 0.5
2978
+ 0.4
2979
+ 0.3
2980
+ 0.2
2981
+ 0.1
2982
+ 0
2983
+ 0
2984
+ 0.5
2985
+ 1.5ThrottlePos(Run2:TPCWithTaskAndDSM_CaseStudy_Stateflow)
2986
+ ThrottlePos (Run5:TPCWithTaskAndDSM_CaseStudy_SimSched)
2987
+ Tolerance
2988
+ 0.6
2989
+ 0.5
2990
+ 0.4
2991
+ 0.3
2992
+ 0.2
2993
+ 0.1
2994
+ 0
2995
+ 0.1
2996
+ 0.2
2997
+ 0.3
2998
+ 0.4
2999
+ 0.5
3000
+ 0.6
3001
+ 0.7
3002
+ 0.8
3003
+ 0.9
3004
+ 1.0
3005
+ 1.1
3006
+ 1.2
3007
+ 1.3
3008
+ 1.4
3009
+ 1.5Throttle Pos
3010
+ 0.7
3011
+ Simulated Pedal Input
3012
+ Throttle Pos
3013
+ 0.6
3014
+ 0.5
3015
+ 0.4
3016
+ 0.3
3017
+ 0.2
3018
+ 0.1
3019
+ 0
3020
+ 0
3021
+ 0.5
3022
+ 1.5J. Chen et al. / Mutation Operators for Simulink Models
3023
+ where each row provides the number of mutants and
3024
+ the mutation score of our mutation operators. The
3025
+ mutation score is a measure that gives the percent-
3026
+ age of killed mutants with the total number of muta-
3027
+ tions.
3028
+ Table 9. Mutation analysis of mutation operators for three ser-
3029
+ vos example.
3030
+ Stateflow Scheduler
3031
+ SimSched
3032
+ Operator
3033
+ Mutants
3034
+ Kills
3035
+ Mutants
3036
+ Kills
3037
+ Offset
3038
+ 36
3039
+ 24
3040
+ 36
3041
+ 27
3042
+ Period
3043
+ 39
3044
+ 25
3045
+ 39
3046
+ 25
3047
+ Execution
3048
+ N/A
3049
+ N/A
3050
+ 36
3051
+ 31
3052
+ Time
3053
+ Precedence
3054
+ 5
3055
+ 0
3056
+ 5
3057
+ 0
3058
+ Priority
3059
+ 18
3060
+ 0
3061
+ 18
3062
+ 0
3063
+ Jitter
3064
+ 36
3065
+ 24
3066
+ 36
3067
+ 27
3068
+ Mutation
3069
+ 54.48%
3070
+ 64.71%
3071
+ Score
3072
+ For the three servo example, we generate 134
3073
+ mutants for the Stateflow scheduler models and 170
3074
+ mutants for the SimSched models. We achieve a
3075
+ mutation score of 54.48% for the Stateflow sched-
3076
+ uler model and 64.71% for the SimSched models.
3077
+ Evaluation results show that the time-related muta-
3078
+ tion operators have the most effect on the mutation
3079
+ testing, such as the Offset, Period, Execution Time,
3080
+ and Jitter mutation operator. We observe that the
3081
+ Precedence and Priority mutation operators kill zero
3082
+ mutant because, in this example, each controller in-
3083
+ dividually controls a motor. There is no connection
3084
+ between them, so the change of precedence and pri-
3085
+ ority does not cause any simulation changes. How-
3086
+ ever, the three controllers run on a single CPU, and
3087
+ one task execution time’s length affects other tasks.
3088
+ We also observe the Offset mutation operator only
3089
+ affects each task’s initial execution, and tasks miss
3090
+ the deadline. Still, each mutant’s simulation results
3091
+ show each controller can have stable control of each
3092
+ servo.
3093
+ Table 10. Mutation analysis of mutation operators for throttle
3094
+ position control example.
3095
+ Stateflow Scheduler
3096
+ SimSched
3097
+ Operator
3098
+ Mutants
3099
+ Kills
3100
+ Mutants
3101
+ Kills
3102
+ Offset
3103
+ 19
3104
+ 7
3105
+ 19
3106
+ 15
3107
+ Period
3108
+ 30
3109
+ 6
3110
+ 30
3111
+ 19
3112
+ Execution
3113
+ N/A
3114
+ N/A
3115
+ 34
3116
+ 34
3117
+ Time
3118
+ Precedence
3119
+ 23
3120
+ 10
3121
+ 23
3122
+ 10
3123
+ Priority
3124
+ 11
3125
+ 0
3126
+ 11
3127
+ 0
3128
+ Jitter
3129
+ 19
3130
+ 12
3131
+ 19
3132
+ 15
3133
+ Shared
3134
+ 72
3135
+ 46
3136
+ 72
3137
+ 46
3138
+ Memory
3139
+ Mutation
3140
+ 49.39%
3141
+ 70.2%
3142
+ Score
3143
+ For the throttle position control example, we
3144
+ generate 164 mutants for the Stateflow scheduler
3145
+ models and 198 mutants for the SimSched models.
3146
+ We achieve a mutation score of 49.39% for the State-
3147
+ flow scheduler model and 70.2% for the SimSched
3148
+ models. Evaluation results are similar to the previ-
3149
+ ous example. The time-related mutation operators
3150
+ have the most effective for mutation testing.
3151
+ We
3152
+ observe that the Shared Memory mutation operators
3153
+ have the same kills for both Mµ and M ′
3154
+ µ. In this
3155
+ example, task T2 has two Runnable TPSPrimary and
3156
+ APPSnsr each has a shared variable to update at each
3157
+ execution, and there is no direct relation between
3158
+ them. Though SimSched can simulate the preemp-
3159
+ tion of T2 to interrupt its execution, shared memory
3160
+ mutants’ model behaviors are the same for both Mµ
3161
+ and M ′
3162
+ µ.
3163
+ From the above two examples, we can see the
3164
+ mutation operators are application-dependent, and
3165
+ SimSched can achieve a higher mutation score at
3166
+ the time-related mutation operators. For example,
3167
+ the Precedence mutation operator kills zero mutants
3168
+ for the three servo example, but it kills ten mutants
3169
+ for the throttle position control example for both
3170
+ Stateflow Scheduler and SimSched. The three-servo
3171
+ example does not require any precedence at all;
3172
+ each task only controls itself. However, the throt-
3173
+ tle position control example requires precedence. A
3174
+ runnable consumes data from a previous runnable
3175
+ execution. If we alter the precedence, the execu-
3176
+ tion order is different from the original model ex-
3177
+
3178
+ J. Chen et al. / Mutation Operators for Simulink Models
3179
+ ecution; it produces a different data flow. For exam-
3180
+ ple, in the Period mutation operator both Stateflow
3181
+ scheduler and SimSched kill the same mutants for
3182
+ the three serve example, but SimSched kills more
3183
+ mutants than the Stateflow Scheduler in the throttle
3184
+ position control example. Because the throttle po-
3185
+ sition control example has four runnables in T1 and
3186
+ each runnable has 1ms execution time. We use a
3187
+ mDTPER to T1 and generate a mutant that the pe-
3188
+ riod of T1 is 4ms instead of 5ms, then T1 will occupy
3189
+ all the execution time slots, and T2 will not be ex-
3190
+ ecuted for the SimSched. However, the Stateflow
3191
+ Scheduler does not consider the execution time, so
3192
+ both T1 and T2 are executed as scheduled.
3193
+ 6.
3194
+ Related Work
3195
+ Our work aligns with the MBMT, and it has been
3196
+ applied to various models.
3197
+ Trakhtenbrot 41 pro-
3198
+ poses mutation testing on statechart-based models
3199
+ for reactive systems. This approach mainly deals
3200
+ with the conformance between specific semantics
3201
+ of statechart models and the model’s implementa-
3202
+ tion.
3203
+ Mutation testing has been applied to fea-
3204
+ ture models to test software product lines 20. The
3205
+ feature models represent the variability of software
3206
+ product lines and configurable systems.
3207
+ El-Fakih
3208
+ et al.14 develop a technique of mutation-based test
3209
+ case generation toward extended finite state ma-
3210
+ chines (EFSM) that examines the EFSM under test
3211
+ agrees to user-defined faults. Belli et al.6 have sur-
3212
+ veyed MBMT approach a great deal and detailed the
3213
+ approach applied to graph-based models, including
3214
+ directed graphs, event sequence graphs, finite-state
3215
+ machines, and statecharts. A recent mutation testing
3216
+ survey31 presents up-to-date advanced approaches
3217
+ that use mutants to support software engineering ac-
3218
+ tivities to model artifacts.
3219
+ Our work also fits in the timed system test-
3220
+ ing, which requires a real-time environment. Re-
3221
+ searchers 3,28,29 have utilized the most studied for-
3222
+ malisms TA to inject faults to the timed system and
3223
+ reveal that the time-related errors are unable to find
3224
+ by using randomly generated test suites.
3225
+ Nilsson
3226
+ et al.28 first proposed a set of extended TA muta-
3227
+ tion operators based on TA with Tasks (TAT) 30 to
3228
+ test real-time systems which depend on the execu-
3229
+ tion time and execution order of individual tasks.
3230
+ The mutation operators are interested in the time-
3231
+ liness of a task to meet its deadlines. Aichernig et
3232
+ al.3 propose a mutation testing framework for timed
3233
+ systems, where they define eight mutation operators
3234
+ to mutate the model and its mutants are expressed
3235
+ as a variant of TA in Uppaal specification format.
3236
+ The authors also develop a mutation-based test case
3237
+ generation framework for real-time, where they use
3238
+ symbolic bounded model checking techniques and
3239
+ incremental solving. Cornaglia et al.11 presents an
3240
+ automated framework MODELTime that facilitates
3241
+ the study of target platform-dependent timing dur-
3242
+ ing the model-based development of embedded ap-
3243
+ plications using MATLAB/Simulink simulations.
3244
+ ML/SL is one of the most popular formalisms to
3245
+ model and simulate embedded systems, and many
3246
+ researchers have explored the various type of mu-
3247
+ tation operators applied to ML/SL models. Hanh
3248
+ et al.18 propose a set of mutation operators based
3249
+ on investigating common faults in ML/SL models
3250
+ to validate test suites and present a process of muta-
3251
+ tion testing for ML/SL models. They provide twelve
3252
+ mutation operators and divide them into five cate-
3253
+ gories. Stephan et al.38 utilize the mutation testing
3254
+ technique to compare model-clone detection tools
3255
+ for ML/SL models.
3256
+ They present a taxonomy of
3257
+ ML/SL and prose a set of structural mutation op-
3258
+ erators based on three clone types. The mutation
3259
+ operators are used to evaluate the model-clone de-
3260
+ tectors. Using the mutation-based technique to gen-
3261
+ erate test cases for ML/SL models automatically has
3262
+ been studied 8,19.
3263
+ They can effectively generate
3264
+ small sets of test cases that achieve high coverage
3265
+ on a collection of Simulink models from the auto-
3266
+ motive domain. A recent work SLEMI 10 has ap-
3267
+ plied mutation techniques to the Simulink compiler
3268
+ and uses tools to generate mutants of the seed model
3269
+ and found 9 confirmed bugs in Simulink models.
3270
+ Our work intends to exploit mutation analysis
3271
+ to identify potential time-related errors in ML/SL
3272
+ models.
3273
+ Roy and Cordy 33,34 first propose using
3274
+ mutation analysis to assist the evaluation of soft-
3275
+ ware clone detection tools. They develop a frame-
3276
+ work for testing code-clone detectors based on mu-
3277
+
3278
+ J. Chen et al. / Mutation Operators for Simulink Models
3279
+ tation. Stephan et al.37,36 proposed a framework that
3280
+ can objectively and quantitatively evaluate and com-
3281
+ pare model-clone detectors using mutation analysis.
3282
+ Their work is based on a structural mutation method
3283
+ for ML/SL model mutation. Our mutation operators
3284
+ are based on a timed system task model, whereas,
3285
+ there are no relevant existing studies that directly
3286
+ integrated the ML/SL models in the timed systems
3287
+ in the MIL phase; thus, we carry out the work pre-
3288
+ sented in this paper.
3289
+ Co-simulation16 is a widely used technique in
3290
+ model-based testing to verify as much of the system
3291
+ functionality, among subsystems, as possible. Com-
3292
+ posing the simulations of sub-simulators can achieve
3293
+ a joint simulation of a coupled system. Many differ-
3294
+ ent languages and tools are used for other purposes
3295
+ in the model-based design domain, either designing
3296
+ continuous plants or discrete controllers.
3297
+ A rela-
3298
+ tively recent open standard functional mock-up in-
3299
+ terface (FMI) is developed for exchange simulation
3300
+ models in a standardized format, including support
3301
+ for co-simulation. Gomes et al.15 propose an ap-
3302
+ proach to facilitate the implementation of the Func-
3303
+ tional Mock-up Interface standard.
3304
+ They use the
3305
+ MBT methodology to evaluate the tools that export
3306
+ Functional Mock-up Units (FMUs). Hence, they can
3307
+ root out the ambiguities and improve conformance
3308
+ to the FMI standard. Garro et al.7 employs FMI to
3309
+ perform co-simulation to verify the system require-
3310
+ ments based on the FOrmal Requirements Model-
3311
+ ing Language and the Modelica language.
3312
+ Zafar
3313
+ et al.42 present a systematic tool-supported MBT
3314
+ workflow to facilitate the simulation-based testing
3315
+ process of an embedded system. The workflow ex-
3316
+ pends from the requirements phase, and generation
3317
+ of executable test scripts, to the execution of gener-
3318
+ ated test scripts on simulation levels.
3319
+ 7.
3320
+ Conclusion and future work
3321
+ In this paper, we proposed a set of timed muta-
3322
+ tion operators for the ML/SL model that is primar-
3323
+ ily intended to integrate the timed task model in the
3324
+ ML/SL model to better support MIL simulation us-
3325
+ ing mutation analysis. Moreover, testing at an ear-
3326
+ lier stage during the development process reduces
3327
+ development costs since earlier changes and fixing
3328
+ errors are much more manageable. We introduce a
3329
+ timed task model and present a set of mutation op-
3330
+ erators for the ML/SL based on this task model. We
3331
+ implement a mutation analysis framework that can
3332
+ apply mutation operators to the simple ML/SL mod-
3333
+ els. We demonstrate the approach on several ML/SL
3334
+ models. The results validate that mutation analysis
3335
+ can reveal time-related faults. We intend to automate
3336
+ the mutation testing process for the ML/SL environ-
3337
+ ment and improve the mutation operators to expose
3338
+ defects in the future. We will further validate our
3339
+ mutation analysis method to more industrial com-
3340
+ plex ML/SL model sets.
3341
+ Acknowledgments
3342
+ This work was supported in part by the Natural Sci-
3343
+ ences and Engineering Research Council of Canada
3344
+ (NSERC), as part of the NECSIS Automotive Part-
3345
+ nership with General Motors, IBM Canada, and Ma-
3346
+ lina Software Corp.
3347
+ 1. Bernhard K. Aichernig, Harald Brandl, Elisabeth
3348
+ J¨obstl, Willibald Krenn, Rupert Schlick, and Stefan
3349
+ Tiran.
3350
+ Killing strategies for model-based mutation
3351
+ testing. Software Testing, Verification and Reliability,
3352
+ 25(8):716–748, dec 2015.
3353
+ 2. Bernhard K. Aichernig and Florian Lorber. Towards
3354
+ generation of adaptive test cases from partial models
3355
+ of determinized timed automata. In 2015 IEEE Eighth
3356
+ International Conference on Software Testing, Verifi-
3357
+ cation and Validation Workshops (ICSTW), pages 1–6.
3358
+ IEEE, apr 2015.
3359
+ 3. Bernhard K. Aichernig, Florian Lorber, and Dejan
3360
+ Niˇckovi´c. Time for mutants - Model-based mutation
3361
+ testing with timed automata. In Lecture Notes in Com-
3362
+ puter Science (including subseries Lecture Notes in
3363
+ Artificial Intelligence and Lecture Notes in Bioinfor-
3364
+ matics), volume 7942 LNCS, pages 20–38. Springer,
3365
+ Berlin, Heidelberg, 2013.
3366
+ 4. Rajeev Alur and David L. Dill. A theory of timed au-
3367
+ tomata. Theoretical Computer Science, 126(2):183–
3368
+ 235, apr 1994.
3369
+ 5. AUTOSAR. Autosar development partnership. http:
3370
+ //www.autosar.org, 2018.
3371
+ 6. Fevzi Belli, Christof J. Budnik, Axel Hollmann,
3372
+ Tugkan Tuglular, and W. Eric Wong.
3373
+ Model-based
3374
+ mutation testing—Approach and case studies. Science
3375
+ of Computer Programming, 120:25–48, may 2016.
3376
+
3377
+ J. Chen et al. / Mutation Operators for Simulink Models
3378
+ 7. Daniel Bouskela, Alberto Falcone, Alfredo Garro, Au-
3379
+ drey Jardin, Martin Otter, Nguyen Thuy, and Andrea
3380
+ Tundis.
3381
+ Formal requirements modeling for cyber-
3382
+ physical systems engineering: an integrated solution
3383
+ based on form-l and modelica. Requirements Engi-
3384
+ neering, pages 1–30, 2021.
3385
+ 8. Angelo Brillout, Nannan He, Michele Mazzucchi,
3386
+ Daniel Kroening, Mitra Purandare, Philipp R¨ummer,
3387
+ and Georg Weissenbacher. Mutation-based test case
3388
+ generation for Simulink models.
3389
+ In Lecture Notes
3390
+ in Computer Science (including subseries Lecture
3391
+ Notes in Artificial Intelligence and Lecture Notes in
3392
+ Bioinformatics), volume 6286 LNCS, pages 208–227,
3393
+ 2010.
3394
+ 9. Jian Chen, Manar H Alalfi, Thomas R Dean, and
3395
+ S Ramesh.
3396
+ Modeling autosar implementations
3397
+ in simulink.
3398
+ In European Conference on Mod-
3399
+ elling Foundations and Applications, pages 279–292.
3400
+ Springer, 2018.
3401
+ 10. Shafiul Azam Chowdhury, Sohil Lal Shrestha, Tay-
3402
+ lor T. Johnson, and Christoph Csallner. Slemi: Equiva-
3403
+ lence modulo input (emi) based mutation of cps mod-
3404
+ els for finding compiler bugs in simulink.
3405
+ In 2020
3406
+ IEEE/ACM 42nd International Conference on Soft-
3407
+ ware Engineering (ICSE), pages 335–346, 2020.
3408
+ 11. Alessandro Cornaglia, Shakib Hasan, Alexander
3409
+ Viehl, Oliver Bringmann, and Wolfgang Rosenstiel.
3410
+ Modeltime: Fully automated timing exploration of
3411
+ simulink models for embedded processors. In AmE
3412
+ 2019 - Automotive meets Electronics; 10th GMM-
3413
+ Symposium, pages 1–6, 2019.
3414
+ 12. Fabio Cremona, Matteo Morelli, and Marco Di Na-
3415
+ tale.
3416
+ TRES: A Modular Representation of Sched-
3417
+ ulers, Tasks, and Messages to Control Simulations
3418
+ in Simulink. Proceedings of the 30th Annual ACM
3419
+ Symposium on Applied Computing, pages 1940–1947,
3420
+ 2015.
3421
+ 13. R.A. DeMillo, R.J. Lipton, and F.G. Sayward. Hints
3422
+ on Test Data Selection: Help for the Practicing Pro-
3423
+ grammer. Computer, 11(4):34–41, apr 1978.
3424
+ 14. Khaled
3425
+ El-Fakih,
3426
+ Anton
3427
+ Kolomeez,
3428
+ Svetlana
3429
+ Prokopenko, and Nina Yevtushenko.
3430
+ Extended
3431
+ Finite State Machine Based Test Derivation Driven
3432
+ by User Defined Faults.
3433
+ In 2008 International
3434
+ Conference on Software Testing, Verification, and
3435
+ Validation, pages 308–317. IEEE, apr 2008.
3436
+ 15. Cl´audio Gomes, Romain Franceschini, Nick Battle,
3437
+ Casper Thule, Kenneth Lausdahl, Hans Vangheluwe,
3438
+ and Peter Gorm Larsen. Application of model-based
3439
+ testing to dynamic evaluation of functional mockup
3440
+ units. 2020.
3441
+ 16. Cl´audio Gomes, Casper Thule, David Broman, Pe-
3442
+ ter Gorm Larsen, and Hans Vangheluwe.
3443
+ Co-
3444
+ simulation: a survey.
3445
+ ACM Computing Surveys
3446
+ (CSUR), 51(3):1–33, 2018.
3447
+ 17. R.G. Hamlet.
3448
+ Testing Programs with the Aid of a
3449
+ Compiler. IEEE Transactions on Software Engineer-
3450
+ ing, SE-3(4):279–290, jul 1977.
3451
+ 18. Le Thi My Hanh and Nguyen Thanh Binh.
3452
+ Muta-
3453
+ tion operators for simulink models. In Proceedings -
3454
+ 4th International Conference on Knowledge and Sys-
3455
+ tems Engineering, KSE 2012, pages 54–59. IEEE, aug
3456
+ 2012.
3457
+ 19. N. He, P. R¨ummer, and D. Kroening. Test-case gener-
3458
+ ation for embedded simulink via formal concept anal-
3459
+ ysis. In 2011 48th ACM/EDAC/IEEE Design Automa-
3460
+ tion Conference (DAC), pages 224–229, 2011.
3461
+ 20. Christopher Henard, Mike Papadakis, Gilles Perrouin,
3462
+ Jacques Klein, and Yves Le Traon. Assessing Soft-
3463
+ ware Product Line Testing Via Model-Based Mu-
3464
+ tation: An Application to Similarity Testing.
3465
+ In
3466
+ 2013 IEEE Sixth International Conference on Soft-
3467
+ ware Testing, Verification and Validation Workshops,
3468
+ pages 188–197. IEEE, mar 2013.
3469
+ 21. Dan Henriksson, Anton Cervin, and Karl-Erik ˚Arz´en.
3470
+ TrueTime : Real-time Control System Simulation with
3471
+ MATLAB / Simulink.
3472
+ Proceedings of the Nordic
3473
+ MATLAB Conference, 2003.
3474
+ 22. Yue Jia and Mark Harman. An Analysis and Survey
3475
+ of the Development of Mutation Testing. IEEE Trans-
3476
+ actions on Software Engineering, 37(5):649–678, sep
3477
+ 2011.
3478
+ 23. Edward A. Lee, Stephen Neuendorffer, and Gang
3479
+ Zhou.
3480
+ Synchronous Reactive Models.
3481
+ In Claudius
3482
+ Ptolemaeus, editor, System Design, Modeling, and
3483
+ Simulation using Ptolemy II. Ptolemy.org, 2014.
3484
+ 24. J. Lehoczky, L. Sha, and Y. Ding. The rate monotonic
3485
+ scheduling algorithm: exact characterization and av-
3486
+ erage case behavior. [1989] Proceedings. Real-Time
3487
+ Systems Symposium, pages 0–5, 1989.
3488
+ 25. MathWorks. Simulink User’s Guide, r2017b. http:
3489
+ //www.mathworks.com, 2017.
3490
+ 26. Reza Matinnejad, Shiva Nejati, Lionel C. Briand, and
3491
+ Thomas Bruckmann. Automated test suite generation
3492
+ for time-continuous simulink models. In Proceedings
3493
+ of the 38th International Conference on Software En-
3494
+ gineering - ICSE ’16, pages 595–606, New York, New
3495
+ York, USA, 2016. ACM Press.
3496
+ 27. Andreas Naderlinger. Simulating preemptive schedul-
3497
+ ing with timing-aware blocks in Simulink. In Design,
3498
+ Automation & Test in Europe Conference & Exhibi-
3499
+ tion (DATE), 2017, pages 758–763. IEEE, mar 2017.
3500
+ 28. R. Nilsson, J. Offutt, and S.F. Andler.
3501
+ Mutation-
3502
+ based testing criteria for timeliness.
3503
+ In Proceed-
3504
+ ings of the 28th Annual International Computer Soft-
3505
+ ware and Applications Conference, 2004. COMPSAC
3506
+ 2004., pages 306–311. IEEE, 2004.
3507
+ 29. Robert Nilsson and Jeff Offutt. Automated testing of
3508
+ timeliness: A case study. In Proceedings - Interna-
3509
+ tional Conference on Software Engineering, 2007.
3510
+
3511
+ J. Chen et al. / Mutation Operators for Simulink Models
3512
+ 30. C. Norstrom, A. Wall, and Wang Yi. Timed automata
3513
+ as task models for event-driven systems.
3514
+ In Pro-
3515
+ ceedings Sixth International Conference on Real-Time
3516
+ Computing Systems and Applications. RTCSA’99
3517
+ (Cat. No.PR00306), pages 182–189, 1999.
3518
+ 31. Mike Papadakis, Marinos Kintis, Jie Zhang, Yue Jia,
3519
+ Yves Le Traon, and Mark Harman.
3520
+ Mutation test-
3521
+ ing advances: an analysis and survey.
3522
+ In Advances
3523
+ in Computers, volume 112, pages 275–378. Elsevier,
3524
+ 2019.
3525
+ 32. Mike Papadakis, Marinos Kintis, Jie Zhang, Yue Jia,
3526
+ Yves Le Traon, and Mark Harman. Mutation Test-
3527
+ ing Advances: An Analysis and Survey. Advances in
3528
+ Computers, 112:275–378, jan 2019.
3529
+ 33. Chanchal K. Roy and James R. Cordy.
3530
+ A muta-
3531
+ tion / injection-based automatic framework for eval-
3532
+ uating code clone detection tools. In IEEE Interna-
3533
+ tional Conference on Software Testing, Verification,
3534
+ and Validation Workshops, ICSTW 2009, pages 157–
3535
+ 166, 2009.
3536
+ 34. Chanchal K. Roy, James R. Cordy, and Rainer
3537
+ Koschke.
3538
+ Comparison and evaluation of code
3539
+ clone detection techniques and tools: A qualita-
3540
+ tive approach.
3541
+ Science of Computer Programming,
3542
+ 74(7):470–495, may 2009.
3543
+ 35. Julien Schmaltz and Jan Tretmans.
3544
+ On confor-
3545
+ mance testing for timed systems.
3546
+ In Lecture Notes
3547
+ in Computer Science (including subseries Lecture
3548
+ Notes in Artificial Intelligence and Lecture Notes in
3549
+ Bioinformatics), volume 5215 LNCS, pages 250–264.
3550
+ Springer, Berlin, Heidelberg, 2008.
3551
+ 36. Matthew Stephan.
3552
+ Model clone detector evaluation
3553
+ using mutation analysis.
3554
+ In Proceedings - 30th In-
3555
+ ternational Conference on Software Maintenance and
3556
+ Evolution, ICSME 2014, pages 633–638. Institute of
3557
+ Electrical and Electronics Engineers Inc., dec 2014.
3558
+ 37. Matthew Stephan, Manar H. Alafi, Andrew Steven-
3559
+ son, and James R. Cordy.
3560
+ Using mutation analysis
3561
+ for a model-clone detector comparison framework. In
3562
+ Proceedings - International Conference on Software
3563
+ Engineering, pages 1261–1264, 2013.
3564
+ 38. Matthew Stephan, Manar H Alalfi, and James R
3565
+ Cordy. Towards a taxonomy for Simulink model mu-
3566
+ tations. In Proceedings - IEEE 7th International Con-
3567
+ ference on Software Testing, Verification and Valida-
3568
+ tion Workshops, ICSTW 2014, pages 206–215, 2014.
3569
+ 39. The AUTOSAR Consortium. Applying simulink to
3570
+ autosar, r3.1., 2006.
3571
+ 40. The AUTOSAR Consortium. The AUTOSAR Stan-
3572
+ dard, r4.3., 2018.
3573
+ 41. Mark Trakhtenbrot. Implementation-Oriented Muta-
3574
+ tion Testing of Statechart Models. In 2010 Third Inter-
3575
+ national Conference on Software Testing, Verification,
3576
+ and Validation Workshops, pages 120–125. IEEE, apr
3577
+ 2010.
3578
+ 42. Muhammad Nouman Zafar, Wasif Afzal, and Eduard
3579
+ Enoiu. Towards a workflow for model-based testing of
3580
+ embedded systems. In Proceedings of the 12th Inter-
3581
+ national Workshop on Automating TEST Case Design,
3582
+ Selection, and Evaluation, pages 33–40, 2021.
3583
+ 43. Yuan Zhan and John A. Clark. Search-based muta-
3584
+ tion testing for simulink models.
3585
+ In GECCO 2005
3586
+ - Genetic and Evolutionary Computation Conference,
3587
+ pages 1061–1068, New York, New York, USA, 2005.
3588
+ ACM Press.
3589
+
OdAyT4oBgHgl3EQf7PpY/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
OtAzT4oBgHgl3EQfIftM/content/tmp_files/2301.01062v1.pdf.txt ADDED
@@ -0,0 +1,2037 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.01062v1 [math.AT] 3 Jan 2023
2
+ ON THE COHOMOLOGY OF TORELLI GROUPS. II
3
+ OSCAR RANDAL-WILLIAMS
4
+ Abstract. We describe the ring structure of the rational cohomology of the
5
+ Torelli groups of the manifolds #gSn × Sn in a stable range, for 2n ≥ 6.
6
+ Some of our results are also valid for 2n = 2, where they are closely related to
7
+ unpublished results of Kawazumi and Morita.
8
+ Contents
9
+ 1.
10
+ Introduction
11
+ 1
12
+ 2.
13
+ Characteristic classes
14
+ 3
15
+ 3.
16
+ Twisted cohomology of diffeomorphism groups
17
+ 9
18
+ 4.
19
+ Cohomology of Torelli groups
20
+ 19
21
+ 5.
22
+ The case 2n = 2
23
+ 22
24
+ References
25
+ 28
26
+ 1. Introduction
27
+ This paper can be considered as a (somewhat extensive) addendum to our earlier
28
+ work with Kupers [KRW20b]. We shall be concerned with the manifold Wg :=
29
+ #gSn × Sn generalising to higher dimensions the orientable surface of genus g, its
30
+ topological group Diff+(Wg) of orientation-preserving diffeomorphisms, and various
31
+ subgroups of it. The first kind of subgroups are Diff(Wg, D2n) ≤ Diff+(Wg, ∗) ≤
32
+ Diff+(Wg), the diffeomorphisms which fix a disc and a point respectively.
33
+ The
34
+ second kind are their Torelli subgroups
35
+ Tor(Wg, D2n),
36
+ Tor+(Wg, ∗),
37
+ Tor+(Wg),
38
+ consisting of those diffeomorphisms which in addition act trivially on Hn(Wg; Z).
39
+ The intersection form on this middle cohomology group is nondegenerate and (−1)n-
40
+ symmetric, giving a homomorphism
41
+ αg : Diff+(Wg) −→ Gg :=
42
+
43
+ Sp2g(Z)
44
+ if n is odd,
45
+ Og,g(Z)
46
+ if n is even.
47
+ This map is not always surjective, but its image is a certain finite-index subgroup
48
+ G′
49
+ g ≤ Gg, even when restricted to Diff(Wg, D2n), so there is an outer G′
50
+ g-action on
51
+ each of the Torelli subgroups. This makes the rational cohomology of each of the
52
+ Torelli groups into G′
53
+ g-representations.
54
+ In [KRW20b], for 2n ≥ 6 we determined H∗(BTor(Wg, D2n); Q) as a ring and
55
+ as a G′
56
+ g-representation in a range of degrees tending to infinity with g.
57
+ Using
58
+ the Serre spectral sequence associated to various simple fibrations relating the dif-
59
+ ferent Torelli groups we were able to also determine H∗(BTor+(Wg, ∗); Q) and
60
+ H∗(BTor+(Wg); Q) as G′
61
+ g-representations. This kind of argument was not able to
62
+ 2010 Mathematics Subject Classification. 55R40, 11F75, 57S05, 18D10, 20G05.
63
+ Key words and phrases. Cohomology of diffeomorphism groups, Torelli groups, cohomology of
64
+ arithmetic groups, Miller-Morita-Mumford classes.
65
+ 1
66
+
67
+ 2
68
+ OSCAR RANDAL-WILLIAMS
69
+ determine the ring structure, however, as multiplicative information gets lost when
70
+ passing to the associated graded of the Serre filtration. Here we shall determine
71
+ H∗(BTor+(Wg, ∗); Q) and H∗(BTor+(Wg); Q) as Q-algebras too: this is achieved in
72
+ Theorem 4.1. The statement given there is more powerful, but just as in [KRW20b,
73
+ Section 5] one can extract from it the following presentation for H∗(BTor+(Wg); Q),
74
+ which is easier to parse. (A presentation for H∗(BTor+(Wg, ∗); Q) can be extracted
75
+ in a similar way.)
76
+ Let us write H(g) := Hn(Wg; Q), on which G′
77
+ g operates in the evident way. Let
78
+ λ : H(g) ⊗ H(g) → Q denote the intersection form, and {ai}2g
79
+ i=1 be a basis of H(g)
80
+ with dual basis {a#
81
+ i }2g
82
+ i=1 in the sense that λ(a#
83
+ i , aj) = δij, so that the form dual
84
+ to the pairing λ is ω = �2g
85
+ i=1 ai ⊗ a#
86
+ i . In Section 2.2 we will construct certain
87
+ “modified twisted Miller–Morita–Mumford classes”, which when restricted to the
88
+ Torelli group yield G′
89
+ g-equivariant maps
90
+ ¯κc : H(g)⊗r −→ Hn(r−2)+|c|(BTor+(Wg); Q)
91
+ for each c ∈ Q[e, p1, p2, . . . , pn−1] = H∗(BSO(2n); Q) and each s ≥ 0. When r = 0
92
+ we write ¯κc = ¯κc(1); these agree with the usual Miller–Morita–Mumford classes κc.
93
+ Theorem A. If 2n ≥ 6 then, in a range of degrees tending to infinity with g,
94
+ H∗(BTor+(Wg); Q) is generated as a Q-algebra by the classes ¯κc(v1 ⊗ · · · ⊗ vr) for
95
+ c a monomial in e, p1, . . . , pn−1, and r ≥ 0, such that n(r −2)+|c| > 0. A complete
96
+ set of relations in this range is given by
97
+ (i) ¯κc(vσ(1) ⊗ · · · ⊗ vσ(r)) = sign(σ)n · ¯κc(v1 ⊗ · · · ⊗ vr),
98
+ (ii) ¯κe(v1) = 0,
99
+ (iii)
100
+
101
+ i
102
+ ¯κx(v ⊗ ai) · ¯κy(a#
103
+ i ⊗ w) = ¯κx·y(v ⊗ w) +
104
+ 1
105
+ χ2 ¯κe2 · ¯κx(v) · ¯κy(w)
106
+ − 1
107
+ χ
108
+
109
+ ¯κe·x(v) · ¯κy(w) + ¯κx(v) · ¯κe·y(w)
110
+
111
+ ,
112
+ (iv)
113
+
114
+ i
115
+ ¯κx(v ⊗ ai ⊗ a#
116
+ i ) = χ−2
117
+ χ ¯κe·x(v) +
118
+ 1
119
+ χ2 ¯κe2 · ¯κx(v),
120
+ (v) ¯κLi = 0,
121
+ for v ∈ H(g)⊗r and w ∈ H(g)⊗s.
122
+ For 2n = 4 or 2n = 2 there is still a map from the Q-algebra given by this
123
+ presentation to H∗(BTor+(Wg); Q). If 2n = 2 then (in a stable range) this map
124
+ is an isomorphism onto the maximal algebraic subrepresentation in degrees ≤ N,
125
+ assuming that H∗(BTor+(Wg); Q) is finite-dimensional in degrees < N for all large
126
+ enough g. This is known to hold for N = 2 by work of Johnson [Joh85].
127
+ 1.1. Outline. The overall strategy is parallel to [KRW20b]. There we defined cer-
128
+ tain twisted Miller–Morita–Mumford classes and used them to describe the twisted
129
+ cohomology groups H∗(BDiff+(Wg, D2n); H⊗s) in a stable range of degrees, where
130
+ H is the local coefficient system corresponding to Hn(Wg; Q) with the action by dif-
131
+ feomorphisms of Wg. This calculation was valid for 2n = 2 as well. Using that for
132
+ 2n ≥ 6 the G′
133
+ g-representations H∗(BTor+(Wg, D2n); Q) extend to representations
134
+ of the ambient algebraic group (namely Sp2g or Og,g) by [KRW20a]1, the argument
135
+ was completed by establishing the degeneration of the Serre spectral sequence
136
+ Ep,q
137
+ 2
138
+ = Hp(G′
139
+ g; Hq(BTor+(Wg, D2n); Q)⊗H⊗s) =⇒ Hp+q(BDiff+(Wg, D2n); H⊗s)
140
+ using work of Borel, and then using a categorical form of Schur–Weyl duality to ex-
141
+ tract the structure of H∗(BTor+(Wg, D2n); Q) from the H∗(BDiff+(Wg, D2n); H⊗s)
142
+ for all s’s and various structure maps between them.
143
+ 1In fact we did something more complicated in [KRW20b] because this algebraicity result was
144
+ not known at the time, but please allow some narrative leeway.
145
+
146
+ ON THE COHOMOLOGY OF TORELLI GROUPS. II
147
+ 3
148
+ The twisted Miller–Morita–Mumford classes may be defined on BDiff+(Wg, ∗)
149
+ too, but not on BDiff+(Wg).
150
+ Our first task will be to define so-called “modi-
151
+ fied twisted Miller–Morita–Mumford classes” in H∗(BDiff+(Wg); H⊗s) and analyse
152
+ their behaviour: it turns out that their behaviour is significantly more complicated
153
+ than the unmodified version, though still understandable. We will then use them
154
+ to describe the twisted cohomology groups H∗(BDiff+(Wg); H⊗s) in a stable range
155
+ of degrees. This description will be in terms of a certain vector space of graphs
156
+ with vertices labelled by monomials in Euler and Pontrjagin classes, which play the
157
+ role here of the vector spaces of labelled partitions from [KRW20b]. The passage
158
+ from this calculation to H∗(BTor+(Wg); Q) is as above.
159
+ The case of dimension 2n = 2 is somewhat special, in precisely the same way as
160
+ it was in [KRW20b]: the calculation of H∗(BDiff+(Wg); H⊗s) is valid in this case,
161
+ but as the cohomology of BTor+(Wg) is not even known to be finite-dimensional
162
+ in a stable range, we cannot make a conclusion about it. (Instead one can make
163
+ a conclusion about the continuous cohomology of the Torelli group, i.e. the Lie
164
+ algebra cohomology of its Mal’cev Lie algebra: see [KRW21], [FNW21], [Hai20].)
165
+ In addition, in this case our modified twisted Miller–Morita–Mumford classes are
166
+ essentially the same as those that have been defined by Kawazumi and Morita
167
+ [Mor96, KM96, KM01], and the graphical calculus that we employ is similar to
168
+ theirs. In Section 5 we fully explain this connection, and also relate it to work of
169
+ Garoufalidis and Nakamura [GN98, GN07] and Akazawa [Aka05].
170
+ To avoid a great deal of repetition we have refrained from spelling out a lot of the
171
+ background that was given in [KRW20b], and from giving in detail arguments that
172
+ are very similar to those given there. As such this paper should not be considered
173
+ as attempting to be self-contained: given that its interest will be to readers of
174
+ [KRW20b] this should not present a problem.
175
+ 1.2. Acknowledgements. I am grateful to Alexander Kupers for feedback on an
176
+ earlier draft. I was supported by the ERC under the European Union’s Horizon
177
+ 2020 research and innovation programme (grant agreement No. 756444) and by a
178
+ Philip Leverhulme Prize from the Leverhulme Trust.
179
+ 2. Characteristic classes
180
+ 2.1. Recollection on twisted Miller–Morita–Mumford classes. If π′ : E′ →
181
+ X′ is an oriented smooth W 2n
182
+ g -bundle equipped with a section s : X′ → E′, and
183
+ H denotes the local coefficient system x �→ Hn((π′)−1(x); Q) on X′, then it is
184
+ explained in [KRW20b, Section 3.2] that there is a unique class ε = εs ∈ Hn(E′; H)
185
+ characterised by
186
+ (i) for each x ∈ X′ the element ε|(π′)−1(x) ∈ Hn((π′)−1(x); Q)⊗Hn((π′)−1(x); Q)
187
+ is coevaluation, and
188
+ (ii) s∗ε = 0.
189
+ The proof is as follows. The Serre spectral sequence yields an exact sequence
190
+ 0 → Hn(X′; H)
191
+ (π′)∗
192
+ → Hn(E′; H) → H0(X′; H∨⊗H)
193
+ dn+1
194
+ → Hn+1(X′; H)
195
+ (π′)∗
196
+ → Hn+1(E′; H)
197
+ and the section s shows that the right-hand map (π′)∗ is injective, so that the map
198
+ dn+1 is zero, and splits the left-hand map (π′)∗. The class coev ∈ H0(X′; H∨ ⊗ H)
199
+ then gives rise to a unique ε satisfying the given properties.
200
+ We then defined the twisted Miller–Morita–Mumford class
201
+ (2.1)
202
+ κεac = κεac(π′, s) :=
203
+
204
+ π′ εa · c(Tπ′E′) ∈ H(a−2)n+|c|(X′; H⊗a).
205
+
206
+ 4
207
+ OSCAR RANDAL-WILLIAMS
208
+ 2.2. Modified twisted Miller–Morita–Mumford classes. If π : E → X is
209
+ an oriented smooth W 2n
210
+ g -bundle but is not equipped with a section then, as long
211
+ as χ := χ(Wg) = 2 + (−1)n2g ̸= 0 (i.e. (n, g) ̸= (odd, 1), cf. Remark 2.1), the
212
+ cohomological role of the section can instead be played by the transfer map
213
+ 1
214
+ χπ!(e · −) : H∗(E; H) −→ H∗(X; H),
215
+ where e := e(TπE) ∈ H2n(E; Q) denotes the Euler class of the vertical tangent
216
+ bundle. The projection formula
217
+ 1
218
+ χπ!(e · π∗(x)) = 1
219
+ χπ!(e) · x = χ
220
+ χx = x
221
+ shows that this map splits π∗. Thus in this situation there is a unique class ¯ε ∈
222
+ Hn(E; H) characterised by
223
+ (i) for each x ∈ X the element ¯ε|π−1(x) ∈ Hn(π−1(x); Q) ⊗ Hn(π−1(x); Q) is
224
+ coevaluation, and
225
+ (ii)
226
+ 1
227
+ χπ!(e · ¯ε) = 0.
228
+ Remark 2.1. If (n, g) = (odd, 1) then there is no class ¯ε ∈ Hn(E; H) satisfying (i)
229
+ and natural under pullback. To see this it suffices to give one example of a smooth
230
+ oriented W1-bundle for which ¯ε does not exist. Consider the Borel construction for
231
+ the evident action of S1 × S1 on W1 = Sn × Sn given by considering Sn as the unit
232
+ sphere in C(n+1)/2. This gives a smoth oriented W1-bundle over B(S1 × S1) with
233
+ total space E ≃ CP(n−1)/2 × CP(n−1)/2. Thus Hn(E; H) = 0 as n is odd but E has
234
+ a cell structure with only even-dimensional cells.
235
+ By analogy with (2.1) we may then define the modified twisted Miller–Morita–
236
+ Mumford class
237
+ (2.2)
238
+ κ¯εac = κ¯εac(π) :=
239
+
240
+ π
241
+ ¯εa · c(TπE) ∈ H(a−2)n+|c|(X; H⊗a).
242
+ If π : E → X does have a section s : X → E then the class ε ∈ Hn(E; H) is also
243
+ defined, and we may compare it with ¯ε as follows:
244
+ Lemma 2.2. If π : E → X has a section then ¯ε = ε − 1
245
+ χπ∗κεe.
246
+ Proof. The classes ε, ¯ε ∈ Hn(E; H) are both defined, and agree when restricted to
247
+ the fibres of the map π, so by considering the Serre spectral sequence for π we must
248
+ have ¯ε − ε = π∗(x) for some class x ∈ Hn(X; H). Applying 1
249
+ χπ!(e · −) we see that
250
+ x = 1
251
+ χπ!(e · (¯ε − ε)) = 0 − 1
252
+ χπ!(e · ε) = − 1
253
+ χκεe. (Here we have used, as we often will,
254
+ the fact that e has even degree to commute it past ε.)
255
+
256
+ Remark 2.3 (Splitting principle). The pullback
257
+ (2.3)
258
+ E1 ×X E2
259
+ E2
260
+ E1
261
+ X,
262
+ pr1
263
+ pr2
264
+ π2
265
+ π1
266
+ where πi : Ei → X are copies of the map π, is equipped with a section given by the
267
+ diagonal map ∆ : E1 → E1 ×X E2. As the maps π∗
268
+ 1 and pr∗
269
+ 2 are injective (they are
270
+ split by their corresponding transfer maps), for the purpose of establishing identities
271
+ between the characteristic classes we have discussed it suffices to do so for bundles
272
+ which do have a section.
273
+ There is another description of ¯ε which is sometimes useful. Let pr1 : E ×X E →
274
+ E be as in (2.3), which is an oriented Wg-bundle with section given by the diagonal
275
+ map ∆, and so has the class κεe(pr1, ∆) defined.
276
+
277
+ ON THE COHOMOLOGY OF TORELLI GROUPS. II
278
+ 5
279
+ Lemma 2.4. We have ¯ε = − 1
280
+ χκεe(pr1, ∆) ∈ Hn(E; H).
281
+ Proof. By Remark 2.3 we may suppose without loss of generality that π : E → X
282
+ has a section s : X → E, defining a class ε = εs ∈ Hn(E; H).
283
+ Consider the
284
+ pullback square (2.3); let ei = (pri)∗(e) ∈ H2n(E1 ×X E2; Q) be the Euler class
285
+ of the vertical tangent bundle on the ith factor. Considering pr1 as a Wg-bundle
286
+ with section given by the diagonal map ∆, there is a class ε∆ ∈ Hn(E1 ×X E2; H)
287
+ defined. As both ε∆ and pr∗
288
+ 2(εs) restrict to coevaluation on the fibres of pr1, we
289
+ have ε∆ − pr∗
290
+ 2(εs) = pr∗
291
+ 1(x) for some class x ∈ Hn(E1; H). Pulling this equation
292
+ back along ∆ shows that x = −εs, so ε∆ = pr∗
293
+ 2(εs) − pr∗
294
+ 1(εs). Then we have
295
+ κεe(pr1, ∆) = (pr1)!(ε∆ · e2)
296
+ = (pr1)!((pr∗
297
+ 2(εs) − pr∗
298
+ 1(εs)) · e2)
299
+ = (pr1)!(pr∗
300
+ 2(εs · e)) − (pr1)!(pr∗
301
+ 1(εs) · e2)
302
+ = π∗
303
+ 1(π2)!(εs · e) − χεs
304
+ = π∗
305
+ 1κεe(π, s) − χεs
306
+ = −χ¯ε
307
+ as required.
308
+
309
+ The intersection form of the fibres of π : E → X provides a map of local coeffi-
310
+ cient systems λ : H⊗H → Q; as we will often be concerned with applying it to two
311
+ factors of a tensor power H⊗k and will have to specify which factors we apply it to,
312
+ we will denote λ by λ1,2 and more generally write λi,j : H⊗k → H⊗k−2 for the map
313
+ that applies λ to the ith and jth factors. We call such operations contraction.
314
+ If p : E1 ×X E2 → X denotes the fibre product of two copies of π : E → X, and
315
+ if this has a section s : X → E, then in [KRW20b, Lemma 3.9] we have established
316
+ the formula
317
+ (2.4)
318
+ λ1,2(ε × ε) = ∆!(1) − 1 × v − v × 1 + p∗s∗e ∈ H2n(E1 ×X E2; Q),
319
+ where v = s!(1) ∈ H2n(E; Q) is the fibrewise Poincar´e dual to the section s, cf.
320
+ [KRW20b, Lemma 3.1]. The analogue of this formula for ¯ε is as follows.
321
+ Lemma 2.5. We have
322
+ λ1,2(¯ε × ¯ε) = ∆!(1) +
323
+ 1
324
+ χ2 p∗κe2 − 1
325
+ χ(e × 1 + 1 × e) ∈ H2n(E1 ×X E2; Q).
326
+ Proof. As in Remark 2.3 we may suppose without loss of generality that π : E → X
327
+ has a section s : X → E, so that ε ∈ Hn(E; H) is defined.
328
+ By Lemma 2.2 we have ¯ε = ε − 1
329
+ χπ∗κεe ∈ Hn(E; H), and so
330
+ λ1,2(¯ε × ¯ε) = λ1,2((ε − 1
331
+ χπ∗κe·ε) × (ε − 1
332
+ χπ∗κe·ε))
333
+ = λ1,2(ε × ε) − λ1,2( 1
334
+ χπ∗κεe × ε)
335
+ − λ1,2(ε × 1
336
+ χπ∗κεe) + λ1,2( 1
337
+ χπ∗κεe × 1
338
+ χπ∗κεe).
339
+ The first term is given by (2.4), and using [KRW20b, Proposition 3.10] the last
340
+ term is given by
341
+ λ1,2( 1
342
+ χπ∗κεe × 1
343
+ χπ∗κεe) =
344
+ 1
345
+ χ2 p∗λ1,2(κεe · κεe) =
346
+ 1
347
+ χ2 p∗(κe2 + (χ2 − 2χ)s∗e).
348
+ For the middle two terms, note that
349
+ ε × 1
350
+ χπ∗κεe = 1
351
+ χ(ε × 1) · p∗(κe·ε) = 1
352
+ χ(ε · π∗κεe) × 1
353
+
354
+ 6
355
+ OSCAR RANDAL-WILLIAMS
356
+ so we need to calculate λ1,2(ε · π∗κεe) ∈ H2n(E; Q). The class ε · κεe is the fibre
357
+ integral along pr1 : E1 ×X E2 → E1 of ε × (ε · e) = (ε × ε) · (1 × e), so
358
+ λ1,2(ε · κεe) = (pr1)!(λ1,2(ε × ε) · (1 × e))
359
+ = (pr1)!((∆!(1) − 1 × v − v × 1 + p∗s∗e) · (1 × e))
360
+ = e − π∗s∗e − χv + χπ∗s∗e
361
+ and hence
362
+ λ1,2(ε × 1
363
+ χκεe) = 1
364
+ χ(e − π∗s∗e − χv + χπ∗s∗e) × 1
365
+ = 1
366
+ χe × 1 + χ−1
367
+ χ p∗s∗e − v × 1
368
+ and similarly
369
+ λ1,2( 1
370
+ χκεe × ε) = 1
371
+ χ1 × e + χ−1
372
+ χ p∗s∗e − 1 × v.
373
+ Combining these gives
374
+ λ1,2(¯ε × ¯ε) = ∆!(1) − 1 × v − v × 1 + p∗s∗e
375
+ +
376
+ 1
377
+ χ2 p∗κe2 + χ−2
378
+ χ p∗s∗e
379
+ − ( 1
380
+ χe × 1 + χ−1
381
+ χ p∗s∗e − v × 1)
382
+ − ( 1
383
+ χ1 × e + χ−1
384
+ χ p∗s∗e − 1 × v)
385
+ = ∆!(1) +
386
+ 1
387
+ χ2 p∗κe2 − 1
388
+ χ(e × 1 + 1 × e)
389
+ as required.
390
+
391
+ If in addition we have a lift ℓ : E → B of the fibrewise Gauss map along some
392
+ fibration θ : B → BSO(2n) then for any c ∈ H∗(B; Q) we can define modified
393
+ twisted Miller–Morita–Mumford classes by the formula
394
+ κ¯εac := π!(¯εa · ℓ∗c) ∈ Hn(a−2)+|c|(X; H⊗a).
395
+ Under the action of a permutation σ ∈ Sa of the tensor factors these classes
396
+ transform as sign(σ)n, as ¯ε has degree n.
397
+ Thus for any finite set S there is a
398
+ well-defined element
399
+ (2.5)
400
+ κ¯εSc := π!(¯εa · ℓ∗c) ∈ Hn(a−2)+|c|(X; H⊗S) ⊗ (det QS)⊗n.
401
+ To keep track of signs, for an ordered set S = {s1 < s2 < · · · < sa} we will often
402
+ write κ¯εs1,...,sac ∈ Hn(a−2)+|c|(X; H⊗S) for the corresponding element, understand-
403
+ ing that if σ is a reordering of S then κ¯εσ(s1),...,σ(sa)c = sign(σ)nκ¯εs1,...,sa c.
404
+ Using Lemma 2.5 we immediately see that these characteristic classes satisfy the
405
+ following analogue of the contraction formula from [KRW20b, Proposition 3.10].
406
+ Proposition 2.6 (Modified contraction formula). In H∗(X; H⊗−) we have the
407
+ identities
408
+ λ1,2(π!(¯ε1,2,...,a · ℓ∗c)) = ( χ−2
409
+ χ )π!(¯ε3,4,...,a · ℓ∗(e · c)) +
410
+ 1
411
+ χ2 κe2 · π!(¯ε3,4,...,a · ℓ∗c)
412
+ and
413
+ λa,a+1(π!(¯ε1,2,...,a · ℓ∗c) · π!(¯εa+1,...,a+b · ℓ∗c′)) = π!(¯ε1,...,a−1,a+2,...,a+b · ℓ∗(c · c′))
414
+ +
415
+ 1
416
+ χ2 κe2 · π!(¯ε1,...,a−1 · ℓ∗c) · π!(¯εa+2,...,a+b · ℓ∗c′)
417
+ − 1
418
+ χπ!(¯ε1,...,a−1 · ℓ∗(e · c)) · π!(¯εa+2,...,a+b · ℓ∗c′)
419
+ − 1
420
+ χπ!(¯ε1,...,a−1 · ℓ∗c) · π!(¯εa+2,...,a+b · ℓ∗(e · c′)).
421
+ Similarly, from Lemma 2.2 we immediately obtain the following:
422
+
423
+ ON THE COHOMOLOGY OF TORELLI GROUPS. II
424
+ 7
425
+ Proposition 2.7. If the bundle π : E → X has a section, so that the class ε and
426
+ hence κεSc is defined, then
427
+ κ¯εSc =
428
+
429
+ I⊆S
430
+ κεIc(− 1
431
+ χκεe)S\I ∈ H∗(X; H⊗S) ⊗ (det QS)⊗n.
432
+ Let us give an example of using the modified contraction formula to evaluate an
433
+ expression.
434
+ Example 2.8. Consider the class λ1,5λ2,6λ3,4(κ¯ε1,2,3 · κ¯ε4,5,6). Then
435
+ λ1,5λ2,6λ3,4(κ¯ε1,2,3 · κ¯ε4,5,6) = λ1,5λ2,6
436
+
437
+ κ¯ε1,2,5,6 +
438
+ 1
439
+ χ2 κe2κ¯ε1,2κ¯ε5,6
440
+ − 1
441
+ χ(κ¯ε1,2eκ¯ε5,6 + κ¯ε1,2κ¯ε5,6e)
442
+
443
+ .
444
+ The first term is
445
+ λ1,5λ2,6(κ¯ε1,2,5,6) = (−1)nλ1,5λ2,6(κ¯ε1,5,2,6)
446
+ = (−1)nλ2,6( χ−2
447
+ χ κ¯ε2,6e +
448
+ 1
449
+ χ2 κe2κ¯ε2,6)
450
+ = (−1)n χ−2
451
+ χ ( χ−2
452
+ χ κe2 +
453
+ 1
454
+ χ2 κe2χ) + (−1)n 1
455
+ χ2 κe2( χ−2
456
+ χ χ)
457
+ = (−1)n( (χ−2)2
458
+ χ2
459
+ + 2 χ−2
460
+ χ2 )κe2.
461
+ The second term is
462
+ 1
463
+ χ2 κe2λ1,5λ2,6(κ¯ε1,2κ¯ε5,6) = (−1)n 1
464
+ χ2 κe2λ1,5λ2,6(κ¯ε1,2κ¯ε6,5)
465
+ = (−1)n 1
466
+ χ2 κe2λ1,5(κ¯ε1,5)
467
+ = (−1)n χ−2
468
+ χ2 κe2.
469
+ The third term is
470
+ − 1
471
+ χλ1,5λ2,6(κ¯ε1,2eκ¯ε5,6) = (−1)n+1 1
472
+ χλ1,5λ2,6(κ¯ε1,2eκ¯ε6,5)
473
+ = (−1)n+1 1
474
+ χλ1,5(κ¯ε1,5e − 1
475
+ χκ¯ε1eκ¯ε5e)
476
+ = (−1)n+1 1
477
+ χ
478
+
479
+ ( χ−2
480
+ χ κe2 +
481
+ 1
482
+ χ2 κe2χ)
483
+ − 1
484
+ χ(κe2 +
485
+ 1
486
+ χ2 κe2χ2 − 1
487
+ χ(2χκe2))
488
+
489
+ = (−1)n+1( χ−2
490
+ χ2 +
491
+ 1
492
+ χ2 −
493
+ 1
494
+ χ2 −
495
+ 1
496
+ χ2 +
497
+ 2
498
+ χ2 )κe2
499
+ = (−1)n+1 χ−1
500
+ χ2 κe2
501
+ and the fourth term is the same as the third by the evident symmetry.
502
+ In total we have
503
+ λ1,5λ2,6λ3,4(κ¯ε1,2,3 · κ¯ε4,5,6) = (−1)n( (χ−2)2
504
+ χ2
505
+ + 2 χ−2
506
+ χ2 + χ−2
507
+ χ2 − 2 χ−1
508
+ χ2 )κe2
509
+ = (−1)n χ−3
510
+ χ κe2.
511
+
512
+ 2.3. Graphical interpretation. In [KRW20b, Section 5] it was found to be very
513
+ convenient to adopt a graphical formalism where κεac corresponds to a vertex with
514
+ a half-edges incident to it and a formal label c, a product of κεac’s corresponds to
515
+ a disjoint union of such vertices, and applying the contraction λi,j corresponds to
516
+ pairing up the half-edges labelled i and j.
517
+ It will be convenient to adopt a similar formalism here. Let S be a finite set, and
518
+ V be a graded Q-algebra with a distinguished element e ∈ V2n. Slightly modifying2
519
+ 2The difference is that we allow labelled vertices whose contribution to the degree is 0.
520
+
521
+ 8
522
+ OSCAR RANDAL-WILLIAMS
523
+ the definition from [KRW20b, Proof of Theorem 5.1], a marked oriented graph with
524
+ legs S and labelled by V consists of the following data:
525
+ (i) a totally ordered finite set ⃗V (of vertices), a totally ordered finite set ⃗H (of
526
+ half-edges), and a monotone function a: ⃗H → ⃗V (encoding that a half-edge
527
+ h is incident to the vertex a(h)),
528
+ (ii) an ordered matching m = {(ai, bi)}i∈I of the set H ⊔ S (encoding the
529
+ oriented edges of the graph),
530
+ (iii) a function c: V → V with homogeneous values, such that |c(v)|+n(|a−1(v)|−
531
+ 2) ≥ 0.
532
+ Marked oriented graphs Γ = (⃗V , ⃗H, a, m, c) and Γ′ = (⃗V ′, ⃗H′, a′, m′, c′) with the
533
+ same set of legs S are isomorphic if there are order-preserving bijections ⃗V
534
+
535
+ → ⃗V ′
536
+ and ⃗H
537
+
538
+ → ⃗H′ which intertwine a and a′, intertwine c and c′, and send m to m′. An
539
+ oriented graph is an isomorphism class [Γ] of marked oriented graph. We assign to
540
+ a marked oriented graph Γ = (⃗V , ⃗H, a, m, c) the degree
541
+ deg(Γ) :=
542
+
543
+ v∈V
544
+
545
+ |c(v)| + n(|a−1(v)| − 2)
546
+
547
+ = n(|H| − 2|V |) +
548
+
549
+ v∈V
550
+ |c(v)|.
551
+ Let π : E → X be an oriented Wg-bundle with a lift ℓ : E → B of the map clas-
552
+ sifying the vertical tangent bundle along θ : B → BSO(2n), and let V := H∗(B; Q)
553
+ and e := θ∗e ∈ V2n. Then given a marked oriented graph Γ = (⃗V , ⃗H, a, m, c) with
554
+ legs S we form a class
555
+ ¯κ(Γ) ∈ Hdeg(Γ)(X; H⊗S)
556
+ by the following recipe. Firstly, we may form
557
+ (2.6)
558
+
559
+ v∈V
560
+ κ¯εa−1(v)c(v) ∈ H∗(X; H⊗H),
561
+ where we have used the ordering on ⃗V to order the product, and the ordering on
562
+ ⃗H to trivialise the factor of (det QH)⊗n = (�
563
+ v∈V det Qa−1(v))⊗n that arises from
564
+ (2.5). Secondly, taking two copies S1 and S2 of the set S and writing si ∈ Si for
565
+ the element corresponding to s ∈ S we can form
566
+ (2.7)
567
+
568
+ s∈S
569
+ κ¯εs1,s2 ∈ H∗(X, H⊗(S1⊔S2)).
570
+ As each κ¯εs1,s2 has degree 0, the product does not depend on how the factors are
571
+ ordered. Taking the product of (2.6) and (2.7) and then applying λx,y for each
572
+ ordered pair (x, y) in the matching m on H ⊔ S = H ⊔ S1 gives the required class
573
+ ¯κ(Γ) ∈ H∗(X; H⊗S2) = H∗(X; H⊗S).
574
+ Example 2.9. In this graphical interpretation we recognise the class evaluated
575
+ in Example 2.8 as that associated to the theta-graph with a certain ordering and
576
+ orientation.
577
+ Clearly ¯κ(Γ) only depends on the underlying oriented graph [Γ]. We now describe
578
+ how it transforms when the orderings on V , H, and the pairs m are changed, without
579
+ changing the underlying labelled graph. If Γ′ = (⃗V ′, ⃗H′, a′, m′, c′) is another marked
580
+ oriented graph and there are bijections f : H → H′ and g : V → V ′ intertwining
581
+ a and a′ and c and c′ and such that under these bijections the matching m′ differs
582
+ from m by reversing the order of k pairs, then
583
+ ¯κ(Γ′) = (−1)nksign(f)sign(g) · ¯κ(Γ)
584
+ for certain signs described on [KRW20b, pp. 55-56].
585
+ Graphs considered as representing ¯κ’s behave differently to those representing κ’s
586
+ described in [KRW20b, Section 5]. To distinguish them we will depict the graphs
587
+
588
+ ON THE COHOMOLOGY OF TORELLI GROUPS. II
589
+ 9
590
+ representing κ’s in red, as we did in that paper, and the graphs representing ¯κ’s in
591
+ blue. The contraction formula of [KRW20b, Proposition 3.10] was interpreted in
592
+ [KRW20b, Section 5] as giving relations among red graphs which yield equivalent
593
+ κ-classes. In the generality of a smooth oriented Wg-bundle π : E → X with section
594
+ s : X → E these may be depicted as follows:
595
+ •c
596
+ =
597
+ •ce
598
+ +s∗e
599
+ •c
600
+ −2s∗c
601
+ •c
602
+ •c′
603
+ =
604
+ •cc′
605
+ +s∗e
606
+ •c
607
+ •c′
608
+ −s∗c
609
+ •c′
610
+ −s∗c′
611
+ •c
612
+ Figure 1. The contraction formula, displayed graphically.
613
+ Here the negative terms only arise when they make sense, i.e. when the vertex
614
+ has valence 2 in the first case, when the vertex labelled c has valence 1 in the second
615
+ case, and when the vertex labelled c′ has valence 1 in the third case.
616
+ Convention. In these and the following figures, to avoid clutter we have adopted the
617
+ following ordering conventions: vertices are numbered starting from 1 from left to
618
+ right, half-edges around each vertex are ordered clockwise starting from the marked
619
+ half-edge, and edges are oriented from the smaller half-edge to the larger one.
620
+ Similarly, the modified contraction formula of Proposition 2.6 can be interpreted
621
+ as giving the following relations among blue graphs which yield equivalent ¯κ-classes:
622
+ •c
623
+ =
624
+ χ−2
625
+ χ
626
+ •ce
627
+ + 1
628
+ χ2
629
+ •c
630
+ •e2
631
+ •c
632
+ •c′
633
+ =
634
+ •cc′
635
+ + 1
636
+ χ2
637
+ •c
638
+ •c′
639
+ •e2
640
+ − 1
641
+ χ(
642
+ •ce
643
+ •c′
644
+ +
645
+ •c
646
+
647
+ c′e
648
+ )
649
+ Figure 2. The modified contraction formula, displayed graphically.
650
+ 3. Twisted cohomology of diffeomorphism groups
651
+ The main goal of this section is to describe the twisted cohomology groups
652
+ H∗(BDiff+(Wg); H⊗S) and H∗(BDiff+(Wg, ∗); H⊗S)
653
+ in a stable range of degrees, of the classifying space BDiff+(Wg) of the group of
654
+ orientation-preserving diffeomorphisms of Wg (which classifies oriented Wg-bundles),
655
+ and the classifying space BDiff+(Wg, ∗) of the group of orientation-preserving dif-
656
+ feomorphisms of Wg which fix a point ∗ ∈ Wg (which classifies oriented Wg-bundles
657
+ with section). In [KRW20b, Theorem 3.15] the analogous calculation was given for
658
+
659
+ 10
660
+ OSCAR RANDAL-WILLIAMS
661
+ the classifying space BDiff(Wg, D2n) of the group of diffeomorphisms of Wg which
662
+ fix a disc D2n ⊂ Wg.
663
+ In order to do this we will also discuss the manifolds Wg equipped with θ-
664
+ structures for the tangential structure θ : BSO(2n)⟨n⟩ → BO(2n), i.e. the n-
665
+ connected cover of BO(2n). In this case we will consider the homotopy quotients
666
+ BDiffθ(Wg) := Bun(T Wg, θ∗γ2n)//Diff(Wg)
667
+ BDiffθ(Wg, ∗) := Bun(T Wg, θ∗γ2n)//Diff(Wg, ∗)
668
+ where Bun(T Wg, θ∗γ2n) denotes the space of vector bundle maps T Wg → θ∗γ2n
669
+ from the tangent bundle of Wg to the bundle classified by θ. The group Diff(Wg)
670
+ acts on the space of bundle maps by precomposing with the derivative.
671
+ There is a factorisation θ : BSO(2n)⟨n⟩
672
+ θor
673
+ → BSO(2n)
674
+ σ→ BO(2n), and by ob-
675
+ struction theory one sees that the space Bun(T Wg, σ∗γ2n) has two contractible
676
+ path components corresponding to the two orientations of Wg. In particular there
677
+ are equivalences
678
+ Bun(T Wg, σ∗γ2n)//Diff(Wg) ≃ BDiff+(Wg)
679
+ Bun(T Wg, σ∗γ2n)//Diff(Wg, ∗) ≃ BDiff+(Wg, ∗)
680
+ and so θor induces maps
681
+ BDiffθ(Wg) −→ BDiff+(Wg)
682
+ and
683
+ BDiffθ(Wg, ∗) −→ BDiff+(Wg, ∗).
684
+ It is shown in [GRW19, Section 5.2] that these are principal SO[0, n − 1]-fibrations.
685
+ In particular the spaces BDiffθ(Wg) and BDiffθ(Wg, ∗) are path-connected.
686
+ 3.1. Spaces of graphs. Our description of the twisted cohomology groups of
687
+ BDiff+(Wg), BDiff+(Wg, ∗), BDiff(Wg, D2n), BDiffθ(Wg) and BDiffθ(Wg, ∗) in a
688
+ stable range will be—via the graphical interpretation given in Section 2.3—in terms
689
+ of graded vector spaces of labelled graphs, modulo certain relations. (Readers of
690
+ [KRW20b] may have been expecting vector spaces of labelled partitions instead:
691
+ here we have found spaces of graphs more convenient for formulating results, cf.
692
+ Remark 3.2, though spaces of labelled partitions will still play a role in the proofs.)
693
+ To describe these spaces of graphs we will use the graded Q-algebras
694
+ V := H∗(BSO(2n)⟨n⟩; Q) = Q[p⌈ n+1
695
+ 4
696
+ ⌉, . . . , pn−1, e]
697
+ W := H∗(BSO(2n); Q) = Q[p1, . . . , pn−1, e]
698
+ with distinguished elements e of degree 2n given by the Euler class. In order to work
699
+ in a way which is agnostic about the genus g of the manifold Wg under consideration,
700
+ we will work over the ring Q[χ±1] instead of Q, where χ is an invertible formal
701
+ parameter which will—later—be set to the Euler characteristic 2 + (−1)n2g of Wg.
702
+ Definition 3.1.
703
+ (i) Let
704
+ Graph1(S) := Q[χ±1]{Γ oriented graph with legs S, labelled in V}/ ∼
705
+ where ∼
706
+ (a) imposes the sign rule for changing orderings of vertices and half-edges and
707
+ for reversing orientations of edges;
708
+ (b) imposes linearity in the labels, and sets a graph containing an a-valent
709
+ vertex labelled by c with |c| + n(a − 1) < 0 to zero;
710
+ (c) sets the 0-valent vertex labelled by e ∈ V2n equal to χ, and if 2n ≡ 0
711
+ mod 4 sets the 0-valent vertex labelled by pn/2 ∈ V2n equal to 0;
712
+
713
+ ON THE COHOMOLOGY OF TORELLI GROUPS. II
714
+ 11
715
+ (d) imposes the contraction relations3
716
+ •c
717
+ =
718
+ •ce
719
+
720
+ 2c
721
+ •c
722
+ •c′
723
+ =
724
+ •cc′
725
+
726
+ c
727
+ •c′
728
+
729
+ c′
730
+ •c
731
+ where the negative terms only arises when they makes sense, i.e. in the
732
+ first case when the vertex has valence 2 and its label c is a scalar multiple
733
+ of 1 ∈ V0, in the second case when c is a scalar multiple of 1 ∈ V0 and has
734
+ valence 1, and similarly in the third case.
735
+ (ii) Let
736
+ Graphθ
737
+ ∗(S) := Q[χ±1]{Γ oriented graph with legs S, labelled in V} ⊗ V/ ∼
738
+ where ∼ imposes (a)–(c) as well as
739
+ (d′) imposes the contraction relations of Figure 1.
740
+ (iii) Let
741
+ Graph∗(S) := Q[χ±1]{Γ oriented graph with legs S, labelled in W} ⊗ W/ ∼
742
+ where ∼ imposes (a) and (b), as well as
743
+ (c′′) sets the 0-valent vertex labelled by e ∈ W2n equal to χ, sets the 0-valent
744
+ vertex labelled by any degree 2n monomial in Pontrjagin classes equal
745
+ to 0, and for any 1 ≤ i ≤ ⌊n/4⌋ sets
746
+
747
+ cpi
748
+ = 1
749
+ χ
750
+ •c
751
+ ⊗pi
752
+ and (d′).
753
+ (iv) Let
754
+ Graphθ(S) := Q[χ±1]{Γ oriented graph with legs S, labelled in V}/ ∼
755
+ where ∼ imposes (a) and (b), as well as
756
+ (c′′′) sets the 0-valent vertex labelled by e ∈ V2n equal to χ, if 2n ≡ 0 mod 4
757
+ sets the 0-valent vertex labelled by pn/2 ∈ V2n equal to 0, and sets the
758
+ 1-valent vertex labelled by e ∈ V2n equal to 0,
759
+ (d′′′) imposes the contraction relations of Figure 2.
760
+ (v) Let
761
+ Graph(S) := Q[χ±1]{Γ oriented graph with legs S, labelled in W}/ ∼
762
+ where ∼ imposes (a), (b), as well as
763
+ (c′′′′) sets the 0-valent vertex labelled by e ∈ W2n equal to χ, sets the 0-valent
764
+ vertex labelled by any degree 2n monomial in Pontrjagin classes equal
765
+ to 0, sets the 1-valent vertex labelled by e ∈ W2n equal to 0, and for
766
+ any 1 ≤ i ≤ ⌊n/4⌋ sets
767
+ 3These are the relations from Figure 1 when s∗ kills all positive-degree classes.
768
+
769
+ 12
770
+ OSCAR RANDAL-WILLIAMS
771
+
772
+ cpi
773
+ = 1
774
+ χ
775
+ •c
776
+
777
+ epi
778
+ and (d′′′).
779
+ Remark 3.2 (Graphs and partitions). In all cases one can apply the (modified)
780
+ contraction formula to pass from a graph to a sum of graphs with strictly fewer
781
+ edges, and so by iterating to a sum of graphs with no edges. These are disjoint
782
+ unions of labelled corollas, and so correspond to partitions of S with labels in V or
783
+ W, plus additional external labels in cases (ii) and (iii). There are two issues with
784
+ this. The first is that in cases (iv) and (v) it is not clear that the resulting sum of
785
+ disjoint unions of labelled corollas is unique, as one has to choose an order in which
786
+ to eliminate edges. The second is that even if it is, then the functoriality on the
787
+ Brauer category which we describe below would involve gluing in edges and then
788
+ eliminating them, leading to a complicated formula. This is why we have found it
789
+ convenient to work with spaces of graphs.
790
+ We wish to consider each of the above as defining functors on the (signed) Brauer
791
+ category as in [KRW20b, Section 2.3], but to take into account the parameter χ we
792
+ must slightly generalise to a Q[χ]-linear version of the (signed) Brauer category.
793
+ Definition 3.3. For finite sets S and T let preBrχ(S, T ) be the free Q[χ]-module
794
+ on tuples (f, mS, mT ) of a bijection f from a subset S◦ ⊂ S to a subset T ◦ ⊂ T ,
795
+ an ordered matching mS of S \ S◦, and an ordered matching mT of T \ T ◦.
796
+ Let Brχ(S, T ) be the quotient of preBrχ(S, T ) by the span of (f, mS, mT ) −
797
+ (f, m′
798
+ S, m′
799
+ T ) whenever mS agrees with m′
800
+ S after reversing some pairs, and mT agrees
801
+ with m′
802
+ T after reversing some pairs.
803
+ Let sBrχ(S, T ) be the quotient of preBrχ(S, T ) by the span of (f, mS, mT ) −
804
+ (−1)kl(f, m′
805
+ S, m′
806
+ T ) whenever mS agrees with m′
807
+ S after reversing k pairs, and mT
808
+ agrees with m′
809
+ T after reversing l pairs.
810
+ Let (s)Brχ be the Q[χ]-linear category whose objects are finite sets, and whose
811
+ morphisms are the Q[χ]-modules (s)Brχ(S, T ) defined above. In the case of Brχ
812
+ we think of [f, mS, mT ] as representing 1-dimensional cobordisms with no closed
813
+ components: then the composition law is given by composing cobordisms and then
814
+ replacing each closed 1-manifold by a factor of χ − 2. In the case of sBrχ we think
815
+ of (f, mS, mT ) as representing oriented 1-dimensional cobordisms with no closed
816
+ components: then the composition law is given by composing cobordisms and then
817
+ replacing each compatibly oriented closed 1-manifold by a factor of −(χ − 2).
818
+ Let d(s)Brχ denote the subcategories having all objects and morphisms spanned
819
+ by [f, mS, mT ] with T ◦ = ∅.
820
+ For a central charge d ∈ Q let (d)(s)Brd denote the Q-linear category obtained
821
+ by specialising the Q[χ]-linear category (d)(s)Brχ to χ = 2+d for (d)Br or χ = 2−d
822
+ for (d)sBr. (This notation then agrees with [KRW20b, Definition 2.14, 2.19].)
823
+ We consider the spaces of graphs above as defining Q[χ]-linear functors
824
+ Graph1(−), Graphθ
825
+ ∗(−), Graph∗(−), Graphθ(−), Graph(−) : (s)Brχ → Gr(Q[χ±1]-mod)
826
+ in the evident way, by gluing of oriented graphs (after orientations have been ar-
827
+ ranged to be compatible). We endow them with a lax symmetric monoidality by
828
+ disjoint union of graphs. We write Graph1(−)g : (s)Br2g → Gr(Q-mod) and so on
829
+ for their specialisations at χ = 2 + (−1)n2g (defined for (n, g) ̸= (odd, 1)).
830
+
831
+ ON THE COHOMOLOGY OF TORELLI GROUPS. II
832
+ 13
833
+ 3.2. The isomorphism theorem. Theorem 3.4 below extends [KRW20b, Theo-
834
+ rem 3.15] to BDiffθ(Wg, ∗), BDiff+(Wg, ∗), BDiffθ(Wg), and BDiff+(Wg).
835
+ To formulate it we first observe that when π : E → X is a smooth oriented
836
+ Wg-bundle and H is the local coefficient system over X given by the fibrewise nth
837
+ homology of this bundle, the fibrewise intersection form λ : H⊗H → Q and its dual
838
+ ω : Q → H ⊗ H are (−1)n-symmetric and satisfy λ ◦ ω = (−1)n2g · Id, so provide a
839
+ Q-linear functor S �→ H⊗S from (s)Br2g to the category of local coefficient systems
840
+ of Q-modules over X. (Strictly speaking our definitions require χ = 2 + (−1)n2g to
841
+ be invertible, so we omit the case (n, g) = (odd, 1).) Composing this with taking
842
+ cohomology gives a functor
843
+ H∗(X; H⊗−) : (s)Br2g −→ Gr(Q-mod)
844
+ S �−→ H∗(X; H⊗S).
845
+ The relations in the various spaces of graphs defined in Section 3.1 were chosen
846
+ precisely to match the contraction formula of [KRW20b, Proposition 3.10] (in the
847
+ case of Graph1) and the modified contraction formula of Proposition 2.6 (in the
848
+ other cases), so that assigning to a graph its associated κ- or ¯κ-class provides
849
+ natural transformations
850
+ (i) κ : Graph1(−)g → H∗(BDiff(Wg, D2n); H⊗−),
851
+ (ii) κ : Graphθ
852
+ ∗(−)g → H∗(BDiffθ(Wg, ∗); H⊗−),
853
+ (iii) κ : Graph∗(−)g → H∗(BDiff+(Wg, ∗); H⊗−),
854
+ (iv) ¯κ : Graphθ(−)g → H∗(BDiffθ(Wg); H⊗−),
855
+ (v) ¯κ : Graph(−)g → H∗(BDiff+(Wg); H⊗−),
856
+ of functors (s)Br2g → Gr(Q-mod).
857
+ Theorem 3.4. For 2n = 2 or 2n ≥ 6 the maps (i)–(v) are isomorphisms in a
858
+ range of cohomological degrees tending to infinity with g.
859
+ We will first give the proof in cases (i), (ii), (iii), and in case (v) assuming case
860
+ (iv); the much more involved case (iv) will be treated afterwards.
861
+ Proof of Theorem 3.4 (i), (ii), (iii), (v). For case (i) observe that Graph1(−)g is
862
+ naturally isomorphic to the functor G(−, V) from [KRW20b, Proof of Theorem 5.1],
863
+ which is shown there to be isomorphic to the functor P(−, V)≥0 ⊗ det⊗n. This case
864
+ then follows from [KRW20b, Theorem 3.15].
865
+ For case (ii) we first construct the homotopy fibre sequence
866
+ (3.1)
867
+ BDiff(Wg, D2n) −→ BDiffθ(Wg, ∗) −→ BSO(2n)⟨n⟩.
868
+ The left-hand term may be written as the homotopy quotient of Diff(Wg, ∗) acting
869
+ on the Stiefel manifold Fr(T∗Wg) given by the space of frames in the tangent space to
870
+ Wg at the point ∗ ∈ Wg, as this action is transitive and its stabiliser is the subgroup
871
+ which fixes a point and its tangent space, which is homotopy equivalent to fixing a
872
+ disc. The middle term was defined as the homotopy quotient of Diff(Wg, ∗) acting
873
+ on Bun(T Wg, θ∗γ2n). Evaluation at ∗ ∈ Wg defines a Diff(Wg, ∗)-invariant map
874
+ ev : Bun(T Wg, θ∗γ2n) −→ BSO(2n)⟨n⟩,
875
+ which is a fibration.
876
+ If we choose a point x ∈ BSO(2n)⟨n⟩ and a framing ξ :
877
+ (θ∗γ2n)x
878
+
879
+ → R2n, then there is a map ξ∗ : ev−1(x) → Fr(T∗Wg) given by sending a
880
+ bundle map ˆℓ : T Wg → θ∗γ2n whose underlying map sends ∗ to x to the framing
881
+ ξ ◦ ˆℓx : T∗Wg → (θ∗γ2n)x → R2n. One verifies by obstruction theory that ξ∗ :
882
+ ev−1(x) → Fr(T∗Wg) is a weak equivalence. Taking homotopy orbits for Diff(Wg, ∗)
883
+ then gives the required homotopy fibre sequence.
884
+
885
+ 14
886
+ OSCAR RANDAL-WILLIAMS
887
+ As H∗(BDiff(Wg, D2n); H⊗S) is spanned by products of twisted Miller–Morita–
888
+ Mumford classes κεac with c ∈ V in a stable range by (i), and these classes may be
889
+ defined on BDiffθ(Wg, ∗), the Serre spectral sequence
890
+ H∗(BSO(2n)⟨n⟩; Q) ⊗ H∗(BDiff(Wg, D2n); H⊗S) ⇒ H∗(BDiffθ(Wg, ∗); H⊗S)
891
+ for the homotopy fibre sequence (3.1) collapses in a stable range. The result then
892
+ follows by observing that the analogue of the Serre filtration of Graphθ
893
+ ∗(−)g, induced
894
+ by the descending filtration by degrees of H∗(BSO(2n)⟨n⟩; Q) = V, has
895
+ gr(Graphθ
896
+ ∗(−)g) ∼= V ⊗ Graph1(−)g,
897
+ because modulo V>0 the formula of (d′) specialises to that of (d). The induced map
898
+ gr(κ) : gr(Graphθ
899
+ ∗(−)g) −→ gr(H∗(BDiffθ(Wg, ∗); H⊗−))
900
+ therefore has the form V ⊗ {the map κ in case (i)} so is an isomorphism in a stable
901
+ range by case (i). Case (ii) follows.
902
+ Case (iii) is just like the above, using the homotopy fibre sequence
903
+ BDiff(Wg, D2n) −→ BDiff+(Wg, ∗) −→ BSO(2n)
904
+ instead, which is established in the analogous way, and W in place of V.
905
+ Case (v) can be deduced from case (iv) by applying the same method to the
906
+ homotopy fibre sequence
907
+ BDiffθ(Wg) −→ BDiff+(Wg)
908
+ ξ
909
+ −→ BSO[0, n]
910
+ established in [GRW19, Section 5.2]. The filtration step is a little different, so we
911
+ give some details. It follows from (iv) that H∗(BDiffθ(Wg); H⊗S) is spanned by
912
+ products of twisted Miller–Morita–Mumford classes κ¯εac with c ∈ V in a stable
913
+ range, and these may be defined on BDiff+(Wg) (in fact they may be defined even
914
+ for c ∈ W) so the corresponding Serre spectral sequence
915
+ H∗(BSO[0, n]; Q) ⊗ H∗(BDiffθ(Wg); H⊗S) ⇒ H∗(BDiff+(Wg); H⊗S)
916
+ degenerates in a stable range. In this case the analogue of the Serre filtration on
917
+ Graph(−)g is induced by giving the graph Υi := ({0}, ∅, ∅ → {0}, ∅, c(0) = epi)
918
+ filtration 4i for 1 ≤ i ≤ ⌊n/4⌋, giving all other connected graphs filtration 0, and
919
+ extending multiplicatively. The associated graded of this filtration has the form
920
+ gr(Graph(−)g) ∼= Q[Υ1, Υ2, . . . , Υ⌊n/4⌋] ⊗ Graphθ(−)g,
921
+ because the relation in (c′′′′) shows that any graph with a vertex labelled cpi for
922
+ 1 ≤ i ≤ ⌊n/4⌋ is equivalent to a graph of strictly larger filtration, unless the vertex
923
+ is 0-valent and the label is epi. As ¯κ(Υi) = κepi = χ · ξ∗(pi) by [GRW19, Remark
924
+ 5.5] it follows that the induced map
925
+ gr(¯κ) : gr(Graph(−)g) −→ gr(H∗(BDiff+(Wg); H⊗S))
926
+ has the form {an isomorphism} ⊗ {the map ¯κ in case (iv)} so is an isomorphism in
927
+ a stable range by case (iv).
928
+
929
+ 3.3. Proof of Theorem 3.4 (iv). The proof of Theorem 3.4 (iv) is of a less formal
930
+ nature. It will be parallel to that of [KRW20b, Theorem 3.15], but algebraically
931
+ more complicated.
932
+ An important tool will be the following lemma, inspired by
933
+ [Qui71, p. 566].
934
+ Lemma 3.5. Let G be a topological group and p : P → X be a principal G-bundle
935
+ with action a : G×P → P, which satisfies the Leray–Hirsch property in cohomology
936
+ over a field F. Then
937
+ H∗(X; F)
938
+ H∗(P; F)
939
+ H∗(G; F) ⊗F H∗(P; F)
940
+ p∗
941
+ a∗
942
+ 1⊗Id
943
+
944
+ ON THE COHOMOLOGY OF TORELLI GROUPS. II
945
+ 15
946
+ is an equaliser diagram.
947
+ Proof. Let us leave F implicit.
948
+ By the Leray–Hirsch property H∗(P) is a free
949
+ H∗(X)-module and hence is faithfully flat. Thus it suffices to prove that the dia-
950
+ gram is an equaliser diagram after applying −⊗H∗(X) H∗(P). By the Leray–Hirsch
951
+ property we also have H∗(P) ⊗H∗(X) H∗(P)
952
+
953
+ → H∗(P ×X P). Thus it suffices to
954
+ show that
955
+ H∗(P)
956
+ H∗(P ×X P)
957
+ H∗(G) ⊗ H∗(P ×X P)
958
+ pr∗
959
+ 2
960
+ a∗
961
+ 1⊗Id
962
+ is an equaliser diagram, which is the same question for the principal G-bundle
963
+ pr2 : P ×X P → P. But this principal G-bundle has a section given by the diagonal
964
+ map, which trivialises it: this trivialisation identifies the diagram with
965
+ H∗(P)
966
+ H∗(G) ⊗ H∗(P)
967
+ H∗(G) ⊗ H∗(G) ⊗ H∗(P)
968
+ 1⊗Id
969
+ µ∗⊗Id
970
+ 1⊗Id
971
+ which is indeed an equaliser diagram as it has a contraction induced by a∗.
972
+
973
+ We adapt the proof of [KRW20b, Theorem 3.15], supposing for concreteness that
974
+ n is odd. Consider the tangential structure θ × Y : BSO(2n)⟨n⟩ × Y → BSO(2n)
975
+ with Y = K(W ∨, n + 1) and W a generic rational vector space. Then we have
976
+ H∗(Y ; Q) ∼= Sym∗(W[n + 1]), the symmetric algebra on the vector space W places
977
+ in (even) degree n + 1. If n is even then like at the end of the proof of [KRW20b,
978
+ Theorem 3.15] we would take Y = K(W ∨, n + 2) instead, so H∗(Y ; Q) would still
979
+ be a symmetric algebra. Apart from this there is no essential difference, and we
980
+ will not comment further on the differences in the case n even.
981
+ There are associated universal Wg-bundles
982
+ π : Eθ −→ BDiffθ(Wg)
983
+ πY : Eθ×Y −→ BDiffθ×Y (Wg)
984
+ and an evaluation map ℓ : Eθ×Y → Y . Neglecting the “maps to Y ” part of the
985
+ tangential structure gives a homotopy fibre sequence
986
+ map(Wg, Y ) −→ BDiffθ×Y (Wg) −→ BDiffθ(Wg).
987
+ We can take Y to be a topological abelian group, which then acts fibrewise on the
988
+ map θ × Y and hence acts on compatibly Eθ×Y and BDiffθ×Y (Wg). Using this we
989
+ can form the homotopy fibre sequence
990
+ map(Wg, Y )//Y −→ BDiffθ×Y (Wg)//Y −→ BDiffθ(Wg).
991
+ The space map(Wg, Y )//Y is a K(Hn(Wg; Q)⊗W ∨, 1), so there is an identification
992
+ of graded local coefficient systems
993
+ H∗(map(Wg, Y )//Y ; Q) = Λ∗(H ⊗ W[1]).
994
+ This is natural in the vector space W, and scaling by u ∈ Q× acts on Λk(H⊗ W[1])
995
+ by uk. It follows that it acts this way on the kth row of the Serre spectral sequence
996
+ Ep,q
997
+ 2
998
+ = Hp(BDiffθ(Wg); Λq(H ⊗ W[1])) ⇒ Hp+q(BDiffθ×Y (Wg)//Y ; Q).
999
+ As the differentials in this spectral sequence must be equivariant for this Q×-action,
1000
+ it follows that they must all be trivial.
1001
+ Furthermore this action gives a weight
1002
+ decomposition of both sides, which identifies
1003
+ H∗(BDiffθ(Wg); Λk(H ⊗ W)) ∼= H∗+k(BDiffθ×Y (Wg)//Y ; Q)(k),
1004
+ the weight k-subspace.
1005
+ To access the latter groups, we use that there is a map
1006
+ α : BDiffθ×Y (Wg) −→ Ω∞
1007
+ 0 (MTθ ∧ Y+)
1008
+
1009
+ 16
1010
+ OSCAR RANDAL-WILLIAMS
1011
+ which by the main theorems of [Bol12, RW16, GTMW09] (for 2n = 2) and [GRW18,
1012
+ GRW14, GRW17] for (2n ≥ 6) is an isomorphism on cohomology in a stable
1013
+ range of degrees. Here MTθ is the Thom spectrum of −θ∗γ2n, so writing u−2n ∈
1014
+ H−2n(MTθ; Q) for its Thom class, by the Thom isomorphism we have
1015
+ H∗(MTθ; Q) ∼= u−2n · H∗(BSO(2n)⟨n⟩; Q) = u−2n · Q[p⌈n+1
1016
+ 4
1017
+ ⌉, . . . , pn−1, e].
1018
+ The rational cohomology of Ω∞
1019
+ 0 (MTθ ∧ Y+) is then given by
1020
+ Sym∗([H∗(MTθ; Q) ⊗ Sym∗(W[n + 1])]>0),
1021
+ which can be considered as the free (graded-)commutative algebra on the even-
1022
+ degree classes κc,w1···wr with c ∈ Q[p⌈ n+1
1023
+ 4
1024
+ ⌉, . . . , pn−1, e] and wi ∈ W, modulo lin-
1025
+ earity in c and in the wi, and modulo commutativity of the wi. The pullbacks
1026
+ of these classes along α we again denote κc,w1···wr, and they may be described
1027
+ intrinsically as the fibre integrals πY
1028
+ ! (c(TπY Eθ×Y ) · ℓ∗(w1 · · · wr)).
1029
+ Lemma 3.6. There are unique classes ¯κc,w1···wr ∈ H∗(BDiffθ×Y (Wg)//Y ; Q) which
1030
+ pull back to
1031
+
1032
+ I⊔J={1,2,...,r}
1033
+ κc,wI ·
1034
+
1035
+ j∈J
1036
+ (− 1
1037
+ χκe,wj) ∈ H∗(BDiffθ×Y (Wg); Q),
1038
+ and in a stable range of degrees H∗(BDiffθ×Y (Wg)//Y ; Q) is the free graded-commutative
1039
+ algebra on the classes ¯κc,w1···wr, modulo linearity in c and in the wi, commutativity
1040
+ of the wi, and modulo ¯κe,w1 = 0.
1041
+ Proof. We wish to apply Lemma 3.5 to the principal Y -bundle
1042
+ (3.2)
1043
+ BDiffθ×Y (Wg) −→ BDiffθ×Y (Wg)//Y.
1044
+ First observe that the fibre inclusion j : Y → BDiffθ×Y (Wg) classifies the Wg-
1045
+ bundle pr1 : Y × Wg → Y equipped with the product θ-structure and the map
1046
+ ℓ = pr1 : Y × Wg → Y . Thus for any w ∈ W we have
1047
+ j∗κe,w = χw ∈ Hn+1(Y ; Q),
1048
+ and so (3.2) satisfies the Leray–Hirsch property.
1049
+ Lemma 3.5 then describes H∗(BDiffθ×Y (Wg)//Y ; Q) as the equaliser of
1050
+ (3.3)
1051
+ H∗(BDiffθ×Y (Wg); Q)
1052
+ H∗(Y ; Q) ⊗ H∗(BDiffθ×Y (Wg); Q).
1053
+ a∗
1054
+ 1⊗Id
1055
+ In a stable range H∗(BDiffθ×Y (Wg); Q) is described in terms of the classes κc,w1···wr,
1056
+ so to make use of this equaliser description we must determine how these classes
1057
+ pull back along the action map
1058
+ a : Y × BDiffθ×Y (Wg) −→ BDiffθ×Y (Wg).
1059
+ This map classifies the Wg-bundle Y × πY : Y × Eθ×Y → Y × BDiffθ×Y (Wg)
1060
+ equipped with the structure map Y × Eθ×Y
1061
+ Y ×ℓ
1062
+
1063
+ Y × Y
1064
+ ·→ Y .
1065
+ As the wi ∈
1066
+ W = Hn+1(Y ; Q) are primitive with respect to the coproduct induced by the
1067
+ multiplication on Y , we have
1068
+ a∗(κc,w1···wr) = (Y × πY )!((1 × c(TπY Eθ×Y )) ·
1069
+ r
1070
+
1071
+ i=1
1072
+ (wi × 1 + 1 × ℓ∗(wi)))
1073
+ =
1074
+
1075
+ I⊔J={1,2,...,r}
1076
+ wI × κc,wJ.
1077
+
1078
+ ON THE COHOMOLOGY OF TORELLI GROUPS. II
1079
+ 17
1080
+ Our goal now is to show that the classes defined by
1081
+ ¯κc,w1···wr :=
1082
+
1083
+ I⊔J={1,2,...,r}
1084
+ κc,wI ·
1085
+
1086
+ j∈J
1087
+ (− 1
1088
+ χκe,wj) ∈ H∗(BDiffθ×Y (Wg); Q)
1089
+ are equalised by the maps (3.3), so by Lemma 3.5Lemma 3.5 descend to unique
1090
+ classes of the same name in H∗(BDiffθ×Y (Wg)//Y ; Q). To see this, we calculate
1091
+ using the formula above that
1092
+ a∗(¯κc,w1···wr) =
1093
+
1094
+ I⊔J={1,2,...,r}
1095
+ � �
1096
+ S⊔T =I
1097
+ wS × κc,wT
1098
+
1099
+ · (−1)|J| �
1100
+ j∈J
1101
+ (wj × 1 + 1
1102
+ χ1 × κe,wj)
1103
+ =
1104
+
1105
+ S⊔T ⊔U⊔V ={1,2,...,r}
1106
+ (−1)|U|wS⊔U ×
1107
+
1108
+ κc,wT ·
1109
+
1110
+ v∈V
1111
+ (− 1
1112
+ χκe,wv)
1113
+
1114
+ .
1115
+ For each A ⊆ {1, 2, . . ., r} the coefficient of wA is
1116
+
1117
+  �
1118
+ U⊆A
1119
+ (−1)|U|
1120
+
1121
+
1122
+
1123
+
1124
+
1125
+ T ⊔V ={1,...,r}\A
1126
+ κc,wT ·
1127
+
1128
+ v∈V
1129
+ (− 1
1130
+ χκe,wv)
1131
+
1132
+
1133
+ and �
1134
+ U⊆A(−1)|U| vanishes if A ̸= ∅, and is 1 if A = ∅ (it is the binomial expansion
1135
+ of (1 − 1)|A|), which shows that a∗(¯κc,w1···wr) = 1 × ¯κc,w1···wr as required.
1136
+ Finally, that these classes (except ¯κe,w1 = 0) freely generate the Q-algebra
1137
+ H∗(BDiffθ×Y (Wg)//Y ; Q) in a stable range follows from the fact that the κc,w1···wr
1138
+ freely generate H∗(BDiffθ×Y (Wg); Q) in a stable range, together with the observa-
1139
+ tion that ¯κc,w1···wr ≡ κc,w1···wr modulo the ideal generated by classes κe,w and the
1140
+ Leray–Hirsch property again.
1141
+
1142
+ Let us provide a “fibre-integral” interpretation of the classes we have just con-
1143
+ structed. Consider the map of principal Y -bundles
1144
+ Y
1145
+ Eθ×Y
1146
+ Eθ×Y //Y
1147
+ Y
1148
+ BDiffθ×Y (Wg)
1149
+ BDiffθ×Y (Wg)//Y.
1150
+ i
1151
+ πY
1152
+ πY //Y
1153
+ j
1154
+ The composition ℓ ◦ i : Y → Y is the identity, so i∗ℓ∗(w) = w ∈ Hn+1(Y ; Q). We
1155
+ showed in the proof above that j∗κe,w = χw ∈ Hn+1(Y ; Q), so in particular both
1156
+ these principal Y -bundles satisfy the Leray–Hirsch property. Together these give
1157
+ that
1158
+ i∗(ℓ∗(w) − 1
1159
+ χ(πY )∗κe,w) = 0.
1160
+ As Y is n-connected it follows from the Serre spectral sequence that there exists a
1161
+ unique class ¯ℓ∗(w) ∈ Hn+1(Eθ×Y //Y ; Q) which pulls back to ℓ∗(w) − 1
1162
+ χ(πY )∗κe,w.
1163
+ Lemma 3.7. We have
1164
+ ¯κc,w1···wr = (πY //Y )!(c · ¯ℓ∗(w1) · · · ¯ℓ∗(wr)) ∈ H∗(BDiffθ×Y (Wg)//Y ; Q).
1165
+ Proof. As the lower of the above principal Y -bundles satisfies the Leray–Hirsch
1166
+ property, this identity may be verified after pulling back to BDiffθ×Y (Wg).
1167
+ In
1168
+ H∗(Eθ×Y ; Q) we have ¯ℓ∗(w) = ℓ∗(w) − 1
1169
+ χ(πY )∗κe,w, so expanding out gives
1170
+ (πY )!(c · ¯ℓ∗(w1) · · · ¯ℓ∗(wr)) = (πY )!(c ·
1171
+ r
1172
+
1173
+ i=1
1174
+ (ℓ∗(wi) − 1
1175
+ χ(πY )∗κe,wi))
1176
+ =
1177
+
1178
+ I⊔J={1,2,...,r}
1179
+ κc,wI ·
1180
+
1181
+ j∈J
1182
+ (− 1
1183
+ χκe,wj)
1184
+
1185
+ 18
1186
+ OSCAR RANDAL-WILLIAMS
1187
+ as required.
1188
+
1189
+ The classes ¯κc,w1···wr provide an isomorphism
1190
+ Sym∗
1191
+ �[H∗(MTθ; Q) ⊗ Sym∗(W[n + 1])]>0
1192
+ u−2n · e ⊗ W[n + 1]
1193
+
1194
+ −→ H∗(BDiffθ×Y (Wg)//Y ; Q)
1195
+ in a stable range, natural in W, which with the discussion above gives an identifi-
1196
+ cation of graded vector spaces
1197
+ H∗(BDiffθ(Wg); Λ∗(H ⊗ W[1])) ∼= Sym∗
1198
+ �[H∗(MTθ; Q) ⊗ Sym∗(W[n + 1])]>0
1199
+ u−2n · e ⊗ W[n + 1]
1200
+
1201
+ natural in W.
1202
+ Just as in the proof of [KRW20b, Theorem 3.15], and using its notation, this
1203
+ implies that there is a natural transformation
1204
+ (3.4)
1205
+ Pbis(−, V)≥0 ⊗ det⊗n −→ H∗(BDiffθ(Wg); H⊗−)
1206
+ of lax symmetric monoidal functors FB → Gr(Q-mod) which is an isomorphism in a
1207
+ stable range, where P(−, V)≥0 → Pbis(−, V)≥0 is the quotient by those partitions
1208
+ containing a part of size 1 labelled by e ∈ V2n. Assigning to a labelled part the
1209
+ corolla with that label gives a natural transformation
1210
+ (3.5)
1211
+ Pbis(−, V)≥0 ⊗ det⊗n −→ Graphθ(−)g,
1212
+ of lax symmetric monoidal functors FB → Gr(Q-mod), and we claim that using this
1213
+ (3.4) factors through the map ¯κ : Graphθ(−)g → H∗(BDiffθ(Wg); H⊗−). Assuming
1214
+ this claim for now, observe that using the contraction relations in Definition 3.1 (iv)
1215
+ (d′′′) to contract all edges shows that (3.5) is surjective, which with the fact that
1216
+ (3.4) is an isomorphism in a stable range will show that the map ¯κ is an isomorphism
1217
+ in a stable range too (as well as the map (3.5)).
1218
+ It remains to show the factorisation, i.e. that the map (3.4) sends a part of size
1219
+ a labelled by c ∈ V to the class κ¯εac. We again proceed as in the relevant step of
1220
+ the proof of [KRW20b, Theorem 3.15]. There is a fibration sequence
1221
+ map(Wg, Y ) −→ Eθ×Y −→ Eθ
1222
+ and so, taking homotopy orbits for the fibrewise Y -action, a fibration sequence
1223
+ map(Wg, Y )//Y −→ Eθ×Y //Y −→ Eθ.
1224
+ Again by functoriality in W the associated Serre spectral sequence collapses to
1225
+ identify the weight decomposition as
1226
+ H∗(Eθ; Λk(H ⊗ W)) ∼= H∗+k(Eθ×Y //Y ; Q)(k).
1227
+ Given the description in Lemma 3.7 we must show that the map
1228
+ ¯ℓ(−) : W −→ Hn+1(Eθ×Y //Y ; Q)(1) ∼= Hn(Eθ; H) ⊗ W
1229
+ is given by w �→ ¯ε ⊗ w, which is the analogue of [KRW20b, Claim 3.16]. As it is
1230
+ natural in the vector space W it must certainly be given by ¯ℓ(w) = x ⊗ w for some
1231
+ x ∈ Hn(Eθ; H), and we must show that x = ¯ε. That the restriction of x to the
1232
+ fibre Wg of π : Eθ → BDiffθ(Wg) is given by coevaluation may be done precisely
1233
+ as in [KRW20b, Claim 3.16]. By the characterisation of ¯ε it remains to check that
1234
+ 1
1235
+ χ(πY )!(e · ¯ℓ∗(w)) = 0 ∈ Hn+1(BDiffθ×Y (Wg)//Y ; Q).
1236
+ By the Leray–Hirsch property this may be checked after pulling back to BDiffθ×Y (Wg),
1237
+ but as ¯ℓ∗(w) = ℓ∗(w) − 1
1238
+ χ(πY )∗κe,w ∈ Hn+1(Eθ×Y ; Q) by definition, the vanishing
1239
+ is immediate.
1240
+
1241
+ ON THE COHOMOLOGY OF TORELLI GROUPS. II
1242
+ 19
1243
+ 3.4. Comparisons. There are natural maps
1244
+ BDiff(Wg, D2n)
1245
+ BDiffθ(Wg, ∗)
1246
+ BDiffθ(Wg)
1247
+ BDiff(Wg, D2n)
1248
+ BDiff+(Wg, ∗)
1249
+ BDiff+(Wg)
1250
+ a
1251
+ b
1252
+ c
1253
+ d
1254
+ e
1255
+ f
1256
+ which each induce maps on H∗(−; H⊗S). There are corresponding maps of spaces
1257
+ of graphs
1258
+ Graph1(−)g
1259
+ Graphθ
1260
+ ∗(−)g
1261
+ Graphθ(−)g
1262
+ Graph1(−)g
1263
+ Graph∗(−)g
1264
+ Graph(−)g
1265
+ a∗
1266
+ b∗
1267
+ e∗
1268
+ c∗
1269
+ d∗
1270
+ f ∗
1271
+ given as follows. The maps c∗ and d∗ are induced by the projections W → V. The
1272
+ maps a∗ and e∗ are induced by applying the augmentations V → Q and W → Q
1273
+ to the second tensor factor. The maps b∗ and f ∗ are more subtle, as they involve
1274
+ converting between blue graphs and red graphs, via the formula of Proposition 2.7.
1275
+ Graphically it is given by
1276
+
1277
+ �→
1278
+
1279
+ − 1
1280
+ χ(
1281
+
1282
+
1283
+ e
1284
+
1285
+
1286
+ e
1287
+
1288
+
1289
+ e
1290
+ +
1291
+ +
1292
+ )
1293
+ with certain orderings.
1294
+ The maps b and f are also oriented Wg-bundles, so they also induce fibre-
1295
+ integration maps b! and f! on cohomology. These are b∗- and f ∗-linear respectively,
1296
+ so are determined by the maps (of degree −2n)
1297
+ b! : V −→ Graphθ(−)g
1298
+ f! : W −→ Graph(−)g
1299
+ which each send a monomial c in pi’s and e to the graph given by a single vertex
1300
+ labelled by c.
1301
+ 4. Cohomology of Torelli groups
1302
+ The isomorphisms provided by Theorem 3.4 can be converted into information
1303
+ about the spaces
1304
+ BTor(Wg, D2n), BTorθ(Wg, ∗), BTor+(Wg, ∗), BTorθ(Wg), BTor+(Wg)
1305
+ just as [KRW20a, Theorem 4.1] is deduced from [KRW20a, Theorem 3.15]. Let us
1306
+ give the definition of these spaces and formulate the result: the following is largely
1307
+ a reminder of some points from [KRW20a], and we do not spell out all details again.
1308
+ The group Diff+(Wg) acts on Hn(Wg; Z) preserving the nondegenerate (−1)n-
1309
+ symmetric intersection form λ : Hn(Wg; Z) ⊗ Hn(Wg; Z) → Z. This provides a
1310
+ homomorphism
1311
+ αg : Diff+(Wg) −→ Gg :=
1312
+
1313
+ Sp2g(Z)
1314
+ if n is odd,
1315
+ Og,g(Z)
1316
+ if n is even.
1317
+ This map is not always surjective, but its image is a certain finite-index subgroup
1318
+ G′
1319
+ g ≤ Gg, an arithmetic group associated to the algebraic group Sp2g or Og,g. This
1320
+ subgroup has been determined by Kreck [Kre79]: it is the whole of Gg if n is even
1321
+ or n = 1, 3, 7, and otherwise is the subgroup Spq
1322
+ 2g(Z) ≤ Sp2g(Z) of those matrices
1323
+ which preserve the standard quadratic refinement (of Arf invariant 0).
1324
+
1325
+ 20
1326
+ OSCAR RANDAL-WILLIAMS
1327
+ We define Tor+(Wg) to be the kernel of this homomorphism, and Tor+(Wg, ∗)
1328
+ and Tor(Wg, D2n) to be the kernel of its restriction to the subgroups Diff+(Wg, ∗)
1329
+ and Diff(Wg, D2n) respectively (these restrictions still have image G′
1330
+ g). Further-
1331
+ more, we define
1332
+ BTorθ(Wg) := Bun+(T Wg, θ∗γ2n)//Tor+(Wg)
1333
+ BTorθ(Wg, ∗) := Bun+(T Wg, θ∗γ2n)//Tor+(Wg, ∗),
1334
+ where Bun+(T Wg, θ∗γ2n) ⊂ Bun(T Wg, θ∗γ2n) consists of the orientation-preserving
1335
+ bundle maps (for some choice of orientation of θ∗γ2n that we make once and for
1336
+ all). By the discussion at the beginning of Section 3 the spaces Bun+(T Wg, θ∗γ2n)
1337
+ are path-connected, so each of the BTor’s we have defined are principal G′
1338
+ g-bundles
1339
+ over the corresponding BDiff’s. In particular, their rational cohomologies are both
1340
+ Q-algebras and G′
1341
+ g-representations, and we will describe them as such in a stable
1342
+ range. Before doing so, we recall that the work of Borel identifies
1343
+ H∗(G′
1344
+ g; Q) =
1345
+
1346
+ Q[σ2, σ6, σ10, . . .]
1347
+ if n is odd,
1348
+ Q[σ4, σ8, σ12, . . .]
1349
+ if n is even.
1350
+ in a stable range of degrees, where σ4i−2n may be chosen so that it pulls back to the
1351
+ Miller–Morita–Mumford class κLi ∈ H4i−2n(BDiff+(Wg; Q) associated to the ith
1352
+ Hirzebruch L-class. In particular the κLi vanish in the cohomology of BTor+(Wg).
1353
+ Let us write H(g) := Hn(Wg; Q), which is the standard representation of G′
1354
+ g.
1355
+ Pulled back from BDiff+(Wg) to BTor+(Wg) the coefficient system H is canonically
1356
+ trivialised, but has an action of G′
1357
+ g: it can be identified with the dual H(g)∨. The
1358
+ edge homomorphism of the Serre spectral sequence
1359
+ (4.1)
1360
+ H∗(BDiff+(Wg); H⊗S) −→
1361
+
1362
+ H∗(BTor+(Wg); Q) ⊗ (H(g)∨)⊗S�G′
1363
+ g
1364
+ allows us to consider the modified twisted Miller–Morita–Mumford classes ¯κεSc as
1365
+ providing G′
1366
+ g-equivariant homomorphisms
1367
+ ¯κc : H(g)⊗S −→ Hn(|S|−2)+|c|(BTor+(Wg); Q).
1368
+ The identities from the modified contraction formula correspond to identities
1369
+ among these maps: this will give relations analogous to [KRW20b, Section 5.2],
1370
+ which we will spell out after the proof of Theorem 4.1 below. First we explain how
1371
+ these relations can be organised in a categorical way, as follows.
1372
+ Considering (4.1) as a natural transformation of functors on (s)Br2g, we may
1373
+ precompose it with the map
1374
+ ¯κ : Graphg(−) −→ H∗(BDiff+(Wg); H⊗−)
1375
+ (which is an isomorphism in a stable range for n ̸= 2 by Theorem 3.4). This gives
1376
+ G′
1377
+ g-equivariant maps H(g)⊗S ⊗ Graphg(S) → H∗(BTor+(Wg); Q) which assemble
1378
+ to a map
1379
+ K∨ ⊗(s)Br2g Graphg(−) −→ H∗(BTor+(Wg); Q)
1380
+ out of the coend, where K : (s)Br2g → Rep(G′
1381
+ g) sends S to H(g)⊗S. The domain
1382
+ obtains a graded-commutative Q-algebra structure coming from the lax symmetric
1383
+ monoidality of Graphg(−) and strong symmetric monoidality of K(−). Theorem
1384
+ 4.1 below will say that this is surjective in a stable range, with kernel the ideal
1385
+ generated by the κLi, but before stating it we explain a simplification.
1386
+ Let us write i : d(s)Br → (s)Br2g for the inclusion of the downward (signed)
1387
+ Brauer category. Thus subcategory is independent if g, as no circles can be created
1388
+ by composing morphisms in the downward Brauer category. Write Graph1(−)′ ⊂
1389
+ i∗Graph1(−)g for the subfunctor where we forbid bivalent vertices labelled by 1 ∈ V
1390
+ both of whose half-edges are legs; similarly, this functor is independent of g. Like
1391
+
1392
+ ON THE COHOMOLOGY OF TORELLI GROUPS. II
1393
+ 21
1394
+ just after [KRW20b, Proposition 3.11], Graph1(−)g is then the left Kan extension
1395
+ i∗Graph1(−)′ of Graph1(−)′ along i. We similarly define Graphθ
1396
+ ∗(−)′, Graph∗(−)′,
1397
+ Graphθ(−)′, and Graph(−)′, whose left Kan extensions again recover the original
1398
+ functors. The following is the analogue of [KRW20b, Theorem 4.1].
1399
+ Theorem 4.1. There are G′
1400
+ g-equivariant ring homomorphisms
1401
+ i∗(K∨) ⊗d(s)Br Graph1(−)′
1402
+ (κLi | 4i − 2n > 0)
1403
+ −→ H∗(BTor(Wg, D2n); Q)
1404
+ (i)
1405
+ i∗(K∨) ⊗d(s)Br Graphθ
1406
+ ∗(−)′
1407
+ (κLi | 4i − 2n > 0)
1408
+ −→ H∗(BTorθ(Wg, ∗); Q)
1409
+ (ii)
1410
+ i∗(K∨) ⊗d(s)Br Graph∗(−)′
1411
+ (κLi | 4i − 2n > 0)
1412
+ −→ H∗(BTor+(Wg, ∗); Q)
1413
+ (iii)
1414
+ i∗(K∨) ⊗d(s)Br Graphθ(−)′
1415
+ (κLi | 4i − 2n > 0)
1416
+ −→ H∗(BTorθ(Wg); Q)
1417
+ (iv)
1418
+ i∗(K∨) ⊗d(s)Br Graph(−)′
1419
+ (κLi | 4i − 2n > 0)
1420
+ −→ H∗(BTor+(Wg); Q)
1421
+ (v)
1422
+ which for 2n ≥ 6 are isomorphisms in a stable range of degrees.
1423
+ If 2n = 2 then, in a stable range of degrees and assuming that the target is
1424
+ finite-dimensional in degrees ∗ < N for all large enough g, these maps are iso-
1425
+ morphisms onto the maximal algebraic subrepresentations in degrees ∗ ≤ N, and
1426
+ monomorphisms in degrees ∗ ≤ N + 1.
1427
+ Proof. By the main theorem of [KRW20a], as long as 2n ≥ 6 the G′
1428
+ g-representations
1429
+ Hi(BTor(Wg, D2n); Q) are algebraic. Using the inheritance properties for algebraic
1430
+ representations from [KRW20a, Theorem 2.2], the Serre spectral sequences for the
1431
+ homotopy fibre sequences
1432
+ BTor(Wg, D2n) −→BTor+(Wg, ∗) −→ BSO(2n)
1433
+ BTor(Wg, D2n) −→BTorθ(Wg, ∗) −→ BSO(2n)⟨n⟩
1434
+ show that the cohomology groups of BTor+(Wg, ∗) and BTorθ(Wg, ∗) are also al-
1435
+ gebraic G′
1436
+ g-representations, and the same for the homotopy fibre sequences
1437
+ Wg −→BTor+(Wg, ∗) −→ BTor+(Wg)
1438
+ Wg −→BTorθ(Wg, ∗) −→ BTorθ(Wg)
1439
+ show that the cohomology groups of BTor+(Wg) and BTorθ(Wg) are algebraic
1440
+ G′
1441
+ g-representations too.
1442
+ Using this algebraicity property, case (i) is precisely [KRW20b, Theorem 4.1],
1443
+ using that by [KRW20b, Proof of Theorem 5.1] Graph1(−)g is isomorphic to the
1444
+ functor P(−, V)≥0⊗det⊗n. The other cases follow in the same way, using [KRW20b,
1445
+ Proposition 2.16], from Theorem 3.4, with one elaboration which we describe below.
1446
+ The addendum in the case 2n = 2 is precisely as in [KRW20b, Theorem 4.1].
1447
+ The elaboration comes when verifying the first hypothesis of [KRW20b, Lemma
1448
+ 4.3], which in case (v) for example requires us to know that H∗(BDiff+(Wg); H⊗S)
1449
+ is a free H∗(G′
1450
+ g; Q)-module in a stable range. But by transfer H∗(BDiff+(Wg); H⊗S)
1451
+ is a summand of H∗(BDiff+(Wg, ∗); H⊗S) (as H∗(G′
1452
+ g; Q)-modules), and similarly
1453
+ with θ-structures, so cases (ii) and (iii) imply cases (iv) and (v).
1454
+ In the other
1455
+ hand in case (iii) for example we have discussed in the proof of Theorem 3.4 the
1456
+ degeneration of the Serre spectral sequence in a stable range, giving
1457
+ gr(H∗(BDiff+(Wg, ∗); H⊗S)) ∼= H∗(BSO(2n); Q) ⊗ H∗(BDiff(Wg, D2n); H⊗S).
1458
+
1459
+ 22
1460
+ OSCAR RANDAL-WILLIAMS
1461
+ The Serre filtration is one of H∗(G′
1462
+ g; Q)-modules, so as the associated graded is a
1463
+ free H∗(G′
1464
+ g; Q)-module in a stable range (because H∗(BDiff(Wg, D2n); H⊗S) is the
1465
+ case treated in [KRW20b, Theorem 4.1]), it follows that H∗(BDiff+(Wg, ∗); H⊗S)
1466
+ is too. The same argument applies in case (ii).
1467
+
1468
+ This quite categorical description can be used to get a more down-to-earth pre-
1469
+ sentation for these cohomology rings: in case (v) this is the presentation we have
1470
+ recorded in Theorem A. This is deduced just as in [KRW20b, Section 5], though
1471
+ most of the work has been done as we have already expressed things in terms of
1472
+ graphs. As in [KRW20b, Section 5.4] this is not the smallest possible presentation:
1473
+ it can be simplified by manipulating graphs; we leave the details to the interested
1474
+ reader.
1475
+ 5. The case 2n = 2
1476
+ Although Theorem 4.1 is only known to hold in a limited range of degrees in the
1477
+ case 2n = 2 (N = 2 is currently the best known constant for g ≥ 3, using the work
1478
+ of Johnson [Joh85]), Theorem 3.4 does hold in a range of cohomological degrees
1479
+ tending to infinity with g. In this case our discussion is closely related to the work
1480
+ of Kawazumi and Morita [Mor96, KM96, KM01], and in this section we we take
1481
+ the opportunity to revisit that work from our perspective. Throughout this section
1482
+ we assume that g ≥ 2, so that χ(Wg) = 2 − 2g ̸= 0.
1483
+ In terms of Kawazumi and Morita’s notation we have
1484
+ Mg := π0(Diff+(Wg))
1485
+ Mg,∗ := π0(Diff+(Wg, ∗))
1486
+ Mg,1 := π0(Diff+(Wg, D2)).
1487
+ Under our assumption g ≥ 2 the groups Diff+(Wg), Diff+(Wg, ∗), and Diff+(Wg, D2)
1488
+ all have contractible path-components, so the group cohomology of Mg is the co-
1489
+ homology of BDiff+(Wg), and so on. Theorem 3.4 gives a natural transformation
1490
+ ¯κ : Graph(−)g −→ H∗(Mg; H⊗−)
1491
+ of functors sBr2g → Gr(Q-mod), which is an isomorphism in a stable range of
1492
+ degrees. Note that in this case H∗(BSO(2); Q) = Q[e] so V = W = Q[e] and there
1493
+ is no difference between the tangential structure θ and an orientation. In particular
1494
+ if we denote by Γi ∈ Graph(∅) the graph with a single vertex, no edges, and labelled
1495
+ by ei+1, then ¯κ(Γi) = κi ∈ H2i(Mg; Q) is the usual Miller–Morita–Mumford class4.
1496
+ Our goal in Sections 5.1–5.4 is to analyse Graph(−) in several ways, making
1497
+ contact with the work of Kawazumi and Morita mentioned above as well as work
1498
+ of Garoufalidis and Nakamura [GN98, GN07] and Akazawa [Aka05].
1499
+ 5.1. Reduction to corollas. The possible labels for the vertices of graphs in
1500
+ Graph(S) are powers of the Euler class e. Given any graph we may iteratedly apply
1501
+ the modified contraction formula to write it as a linear combination of graphs with
1502
+ fewer edges, and hence any graph is equivalent to a linear combination of graphs
1503
+ with no edges: these are disjoint unions of corollas. Of these, by definition of Graph:
1504
+ the 0-valent corolla labelled by e is equal to the scalar χ, the 1-valent corolla labelled
1505
+ by 1 ∈ V is trivial, and the 1-valent corolla labelled by e ∈ V is trivial. Define a
1506
+ labelled partition of a finite set S to be a partition {Sα}α∈I of S into (possibly
1507
+ empty) subsets and a label enα for each part, such that
1508
+ (i) If |Sα| = 0 then nα ≥ 2,
1509
+ (ii) If |Sα| = 1 then nα ≥ 1.
1510
+ 4Our κi is denoted ei in the work of Kawazumi and Morita.
1511
+
1512
+ ON THE COHOMOLOGY OF TORELLI GROUPS. II
1513
+ 23
1514
+ We give a part (Sα, nα) degree 2nα + |Sα| − 2, and a labelled partition the degree
1515
+ given by the sums of the degrees of its parts. Similarly to the proof of Theorem 3.4
1516
+ (iv) (particularly around equation (3.5)), let Pbis(S, V)≥0 denote the free Q[χ±1]-
1517
+ module with basis the set of labelled partitions of S. Assigning to a labelled part
1518
+ (Sα, enα) the corolla with legs Sα and label enα defines a map
1519
+ (5.1)
1520
+ Pbis(S, V)≥0 ⊗ det QS −→ Graph(S),
1521
+ natural in S with respect to bijections.
1522
+ Lemma 5.1. The map (5.1) is an isomorphism.
1523
+ Proof. It is surjective, as explained above, by repeatedly applying the modified
1524
+ contraction formula to express a graph in terms of graphs without edges.
1525
+ If it were not injective then it would have some nontrivial Q[χ±1]-linear com-
1526
+ bination of labelled partitions in its kernel, of a given degree d, and this would
1527
+ remain a nontrivial Q-linear combination of labelled partitions when specialised to
1528
+ χ = 2 − 2g for all g ≫ 0 (as a Laurent polynomial in χ has finitely-many roots).
1529
+ But in the proof of Theorem 3.4 (iv), in the discussion after equation (3.5), it is
1530
+ explained that when specialised to χ = 2 − 2g this map is an isomorphism in a
1531
+ range of degrees tending to infinity with g; for large enough g the degree d will be
1532
+ in this stable range, a contradiction.
1533
+
1534
+ In particular, for the graphs Γi described above there is an isomorphism
1535
+ (5.2)
1536
+ Q[χ±1][Γ1, Γ2, . . .] ∼= Graph(∅).
1537
+ 5.2. Reduction to trivalent graphs without labels. In this section we will
1538
+ prove the following.
1539
+ Theorem 5.2. Using the modified contraction formula any marked oriented graph
1540
+ is equivalent to a Q[χ±1, (χ − 2)−1, (χ − 3)−1, (χ − 4)−1]-linear combination of
1541
+ trivalent graphs with all vertices labelled by 1 ∈ V0.
1542
+ Let Graphtri(S) ≤ Graph(S) denote the sub-Q[χ±1]-module spanned by those
1543
+ marked oriented graphs which are trivalent and all of whose labels are 1 ∈ V.
1544
+ Corollary 5.3. The monomorphism i : Graphtri(−) → Graph(−) becomes an iso-
1545
+ morphism upon inverting χ − 2, χ − 3, and χ − 4. In particular Graphtri(−)g =
1546
+ Graph(−)g.
1547
+ Remark 5.4 (2-valent vertices labelled by 1). Using the relation
1548
+ λ2,3(κ¯ε1,2κ¯ε3,...,nc) = κ¯ε1,3,...,nc
1549
+ we can always remove 2-valent vertices labelled by 1. It is sometimes convenient
1550
+ when writing formulas for 3-valent graphs to also allow 2-valent vertices labelled
1551
+ by 1: we allow ourselves to do so, noting that the above can always be used to
1552
+ eliminate the 2-valent vertices.
1553
+ Proof of Theorem 5.2. As a matter of notation we will formally manipulate modi-
1554
+ fied twisted Miller–Morita–Mumford classes, but this is equivalent to manipulating
1555
+ marked oriented graphs. Rearranging the first contraction formula gives
1556
+ (5.3)
1557
+ κ¯εaeb =
1558
+ χ
1559
+ χ−2
1560
+
1561
+ λ1,2κ¯ε2+aeb−1 −
1562
+ 1
1563
+ χ2 κe2κ¯εaeb−1
1564
+
1565
+ .
1566
+ Rearranging the second contraction formula gives
1567
+ κ¯εa+b = λa+1,a+2(κ¯εa+1 · κ¯ε1+b) −
1568
+ 1
1569
+ χ2 (κe2 · κ¯εa · κ¯εb) + 1
1570
+ χ(κ¯εae · κ¯εb + κ¯εa · κ¯εbe)
1571
+
1572
+ 24
1573
+ OSCAR RANDAL-WILLIAMS
1574
+ and using (5.3) to eliminate the Euler classes from the last two terms gives
1575
+ κ¯εa+b = λa+1,a+2(κ¯εa+1 · κ¯ε1+b) −
1576
+ 1
1577
+ χ2 (κe2 · κ¯εa · κ¯εb)
1578
+ +
1579
+ 1
1580
+ χ−2((λ1,2(κ¯ε2+a) −
1581
+ 1
1582
+ χ2 κe2κ¯εa) · κ¯εb + κ¯εa · (λ1,2(κ¯ε2+b) −
1583
+ 1
1584
+ χ2 κe2κ¯εb))
1585
+ = λa+1,a+2(κ¯εa+1 · κ¯ε1+b) +
1586
+ 1
1587
+ χ−2 ((λ1,2(κ¯ε2+a) · κ¯εb + κ¯εa · λ1,2(κ¯ε2+b))
1588
+
1589
+ 1
1590
+ χ(χ−2)κe2 · κ¯εa · κ¯εb.
1591
+ It suffices to show that each corolla κ¯εaeb may be represented by a linear combi-
1592
+ nation of trivalent graphs. By Example 2.8 the class κe2 may be represented by a
1593
+ trivalent graph (after inverting χ − 3) so by iteratedly applying (5.3) it suffices to
1594
+ show that each κ¯εn can too. By Remark 5.4 we may as well show that classes can
1595
+ be represented by 2- and 3-valent graphs. To get started we have κ¯ε = 0 as it has
1596
+ negative degree.
1597
+ Consider the class λ2,5λ3,4(κ¯ε1,2,3 ·κ¯ε4,5,6). Using the form of the relations above,
1598
+ which avoid creating Euler classes, this is
1599
+ λ2,5(κ¯ε1,2,5,6 −
1600
+ 1
1601
+ χ−2(λu,v(κ¯εu,v,1,2)κ¯ε5,6 + κ¯ε1,2λu,v(κ¯εu,v,5,6)) +
1602
+ 1
1603
+ χ(χ−2)(κe2κ¯ε1,2κ¯ε5,6))
1604
+ = λ2,5(κ¯ε1,2,5,6) −
1605
+ 2
1606
+ χ−2λu,v(κ¯εu,v,1,6) +
1607
+ 1
1608
+ χ(χ−2)κe2κ¯ε1,6
1609
+ = χ−4
1610
+ χ−2λ2,5(κ¯ε1,2,5,6) +
1611
+ 1
1612
+ χ(χ−2)κe2κ¯ε1,6
1613
+ Renumbering legs and rearranging, this shows that λ1,2(κ¯ε4) may be represented
1614
+ by 2- and 3-valent graphs.
1615
+ Applied with (a, b) = (2, 2) the second relation gives
1616
+ κ¯ε4 = λ3,4(κ¯ε3 · κ¯ε3) +
1617
+ 1
1618
+ χ−2 ((λ1,2(κ¯ε4) · κ¯ε2 + κ¯ε2 · λ1,2(κ¯ε4)) −
1619
+ 1
1620
+ χ(χ−2)κe2 · κ¯ε2 · κ¯ε2,
1621
+ which with the above shows that κ¯ε4 may be represented by 2- and 3-valent graphs.
1622
+ Similarly to the above, consider λ2,5λ3,4(κ¯ε1,2,3 · κ¯ε4,5,6,7), which is
1623
+ λ2,5(κ¯ε1,2,5,6,7 +
1624
+ 1
1625
+ χ(χ−2)κe2κ¯ε1,2κ¯ε5,6,7 −
1626
+ 1
1627
+ χ−2(λu,v(κ¯εu,v,1,2)κ¯ε5,6,7 + κ¯ε1,2λu,v(κ¯εu,v,5,6,7)))
1628
+ = λ2,5(κ¯ε1,2,5,6,7) +
1629
+ 1
1630
+ χ(χ−2)κe2κ¯ε1,6,7 −
1631
+ 1
1632
+ χ−2(λ2,5λu,v(κ¯εu,v,1,2κ¯ε5,6,7) + λu,v(κ¯εu,v,1,6,7))
1633
+ = χ−3
1634
+ χ−2λ2,5(κ¯ε1,2,5,6,7) +
1635
+ 1
1636
+ χ(χ−2)κe2κ¯ε1,6,7 −
1637
+ 1
1638
+ χ−2λ2,5λu,v(κ¯εu,v,1,2)κ¯ε5,6,7.
1639
+ Renumbering legs and rearranging, this shows that λ1,2(κ¯ε5) may be represented by
1640
+ 2-, 3-, and 4-valent graphs; with the above it follows that it can also be represented
1641
+ by 2- and 3-valent graphs.
1642
+ Applied with (a, b) = (2, 3) the second relation gives
1643
+ κ¯ε5 = λ3,4(κ¯ε3 · κ¯ε4) +
1644
+ 1
1645
+ χ−2 ((λ1,2(κ¯ε4) · κ¯ε3 + κ¯ε2 · λ1,2(κ¯ε5)) −
1646
+ 1
1647
+ χ(χ−2)κe2 · κ¯ε2 · κ¯ε3,
1648
+ so it follows that κ¯ε5 may be represented by 2- and 3-valent graphs.
1649
+ If n ≥ 6 then we can write n = a + b with a, b ≥ 3, so a + 2, b + 2 < n and so the
1650
+ second relation expresses κ¯εn in terms of κ¯εm’s with m < n. Thus all κ¯εn’s may be
1651
+ represented by 2- and 3-valent graphs as required.
1652
+
1653
+ It is worth observing that we have the relation
1654
+ (5.4)
1655
+ λ1,2(κ¯ε3) = χ−2
1656
+ χ κ¯εe +
1657
+ 1
1658
+ χ2 κe2κ¯ε = 0,
1659
+ using that κ¯εe = 0 (by definition) and that κ¯ε = 0 (as it has negative degree). This
1660
+ means that any graph having a trivalent vertex with a loop is trivial in Graph(−).
1661
+
1662
+ ON THE COHOMOLOGY OF TORELLI GROUPS. II
1663
+ 25
1664
+ 5.3. A remark on orderings. A curious normalisation is possible when consider-
1665
+ ing trivalent graphs, allowing one to neglect the orderings of vertices, of half-edges,
1666
+ and the orientations of edges. In [Mor96, KM96, KM01] this is implemented ab
1667
+ initio and (marked) oriented graphs play no role. Let us explain this normalisation,
1668
+ extended to trivalent graphs with legs.
1669
+ A trivalent graph ˜Γ with legs S consists of a set V of vertices, a set H of half-
1670
+ edges, a 3-to-1 map a : H → V recording to which vertex each half-edge is incident,
1671
+ and an unordered matching µ on H ⊔ S recording which half-edges span an edge,
1672
+ and which half-edges are connected to which legs.
1673
+ Given a trivalent graph ˜Γ = (V, H, a : H → V, µ) with legs S, we may choose an
1674
+ ordering of V and choose an ordering of H such that a : H → V is weakly monotone
1675
+ (equivalently, choose an ordering of the half-edges incident at each vertex). We also
1676
+ choose an ordering of S.
1677
+ There is an induced ordering of H ⊔ S by putting ⃗S
1678
+ after ⃗H, and we form an ordered matching m of H ⊔ S by taking those pairs
1679
+ (a, b) with a < b and {a, b} ∈ µ. Using this we form an oriented trivalent graph
1680
+ Γchoice = (⃗V , ⃗H, a : H → V, m), depending on these choices. The normalisation is
1681
+ as follows. Let x1 < x2 < x3 < x4 < . . . < x2k ∈ H ⊔ S be the total order on
1682
+ H ⊔ S, and let a1 < b1, . . . , ak < bk be the ordered pairs which span an edge, with
1683
+ a1 < a2 < . . . < ak ∈ H ⊔ S. Then there is a bijection given by
1684
+ ρ :=
1685
+ � a1 b1 a2 b2 a3 b3 ···
1686
+ ak
1687
+ bk
1688
+ x1 x2 x3 x4 x5 x6 ··· x2k−1 x2k
1689
+
1690
+ and we define Γ := sign(ρ) · Γchoice.
1691
+ Claim. As long as ˜Γ has no vertices with loops, the element Γ does not depend
1692
+ on the choice of ordering of V or H, and depends on the ordering of S precisely as
1693
+ the sign representation.
1694
+ In particular if we set5
1695
+ Graphundec(S) := Q[χ±1][˜Γ trivalent graph with legs S]/(graphs with loops)
1696
+ then the Claim together with the relation (5.4) provides an epimorphism
1697
+ Φ : Graphundec(S) ⊗ det QS −→ Graphtri(S)
1698
+ of Q[χ±1]-modules, natural with respect to bijections in S. This can be extended to
1699
+ a natural transformation of functors on sBrχ by letting an ordered matching (a, b)
1700
+ of elements of S act by adding an edge to the trivalent graph connecting a and b,
1701
+ and contracting the determinant by a ∧ b. Doing so might create a circle with no
1702
+ vertices, which should be replaced by the scalar χ − 2.
1703
+ Proof of Claim. If (h1, h2, h3) are the half-edges incident at a vertex v and we
1704
+ change their ordering to (hσ(1), hσ(2), hσ(3)) giving Γ′
1705
+ choice, then (under the assump-
1706
+ tion that Γ does not have loops) the relative ordering of half-edges forming an
1707
+ edge has not changed, so m′ = m. Thus Γ′
1708
+ choice = sign(σ) · Γchoice. On the other
1709
+ hand ρ′ is obtained from ρ by postcomposing with σ, and precomposing with a
1710
+ permutation which permutes some (ai < bi)’s, which is an even permutation. Thus
1711
+ sign(ρ′) = sign(σ) · sign(ρ), so Γ′ = Γ.
1712
+ Suppose a vertex v1 has half edges (h1
1713
+ 1, h1
1714
+ 2, h1
1715
+ 3) and v2 has half edges (h2
1716
+ 1, h2
1717
+ 2, h2
1718
+ 3),
1719
+ and v1 < v2 ∈ ⃗V are adjacent in the ordering on V , and consider transposing the
1720
+ ordering of these vertices. For edges between a u < v1 and a vi or between a vi and
1721
+ a u > v2 the relative ordering of their half-edges does not change. Edges between
1722
+ v1 and v2 have the relative ordering of their half-edges reversed. Thus if there are
1723
+ N such edges we have Γ′
1724
+ choice = (−1)1+N ·Γchoice. But the permutation ρ is changed
1725
+ 5In [Mor96, KM96, KM01] they restrict to “trivalent graphs without loops”, however we find
1726
+ it more natural to allow loops but set graphs with a loop to zero.
1727
+
1728
+ 26
1729
+ OSCAR RANDAL-WILLIAMS
1730
+ by permuting (h2
1731
+ 1, h2
1732
+ 2, h2
1733
+ 3) past (h1
1734
+ 1, h1
1735
+ 2, h1
1736
+ 3), which has sign −1, and N transpositions
1737
+ (aibi), which has sign (−1)N. Thus again Γ′ = Γ.
1738
+ Finally, changing the order of S by a permutation τ changes ρ by postcomposition
1739
+ with τ, so acts as sign(τ).
1740
+
1741
+ Example 5.5. For the ordering of vertices and half-edges corresponding to the
1742
+ theta-graph in Example 2.8 the associated permutation is ρ = (1)(235)(46) which
1743
+ is odd, so the undecorated theta-graph yields χ−3
1744
+ χ κe2. This is precisely minus the
1745
+ evaluation of βΓ2 on [KM01, p. 39] (unfortunately the theta-graph is denoted Γ2 in
1746
+ that paper). This minus comes from the use of a different sign convention, see the
1747
+ discussion at [KRW20b, top of p. 33].
1748
+ 5.4. Relations among trivalent graphs. The modified contraction formula de-
1749
+ scribes relations among graphs involving contracting an edge, but this necessarily
1750
+ involves graphs with vertices of different valencies. In Theorem 5.2 we have ex-
1751
+ plained that, in the case of surfaces, all graphs may be expressed purely in terms of
1752
+ trivalent graphs: one may ask what relations among trivalent graphs Γ are imposed
1753
+ by the contraction formula.
1754
+ For the unmodified contraction formula discussed in [KRW20b], the answer is
1755
+ that it imposes the “I = H” relation among trivalent graphs: this is because
1756
+ both the I- and H-graphs admit contractions to the X-graph. Furthermore, as all
1757
+ connected trivalent graphs with the same number of legs and of the same genus are
1758
+ equivalent under the “I = H” relation, and the contraction formula never changes
1759
+ the genus or number of legs, there are no further relations.
1760
+ In the setting of the modified contraction formula discussed here it is more
1761
+ complicated. It is best given in the setting of undecorated trivalent graphs.
1762
+ Theorem 5.6. After inverting χ−2, χ−3, and χ−4, undecorated trivalent graphs
1763
+ which differ locally by
1764
+ (IHmod)
1765
+ =
1766
+ +
1767
+ 1
1768
+ (χ−4)(3−χ)(
1769
+
1770
+ )
1771
+ +
1772
+ 1
1773
+ χ−4(
1774
+ +
1775
+
1776
+
1777
+ )
1778
+ give the same elements in Graphtri[(χ − 2)−1, (χ − 3)−1, (χ − 4)−1].
1779
+ Proof. We establish this relation in Graphtri({a, b, c, d})⊗detQ{a,b,c,d}, and it then
1780
+ follows in general using functoriality on the signed Brauer category. We order the
1781
+ legs as a < b < c < d.
1782
+ (i)
1783
+ a
1784
+ b
1785
+ c
1786
+ d
1787
+ 1
1788
+ 3
1789
+ 5
1790
+ 6 2
1791
+ 4
1792
+ (ii)
1793
+ a
1794
+ b
1795
+ c
1796
+ d
1797
+ 1 2
1798
+ 6
1799
+ 3 4
1800
+ 5
1801
+ Figure 3. Some marked graphs.
1802
+ Consider first the H-shaped graph shown in Figure 3 (i), with the depicted names
1803
+ of half edges, ordered as 3 < 1 < 5 < 6 < 2 < 4. Its corresponding permutation is
1804
+ � 3 c 1 a 5 6 2 b 4 d
1805
+ 3 1 5 6 2 4 a b c d
1806
+
1807
+ which is even. Thus this ordering data represents the underlying
1808
+
1809
+ ON THE COHOMOLOGY OF TORELLI GROUPS. II
1810
+ 27
1811
+ undecorated H-shaped trivalent graph. Ignoring for now the matchings to the legs
1812
+ (which are given by matching 1 with a, 2 with b, and so on), it corresponds to
1813
+ λ5,6(κ¯ε3,1,5 · κ¯ε6,2,4). Using the form of the relations which avoid creating Euler
1814
+ classes from the proof of Theorem 5.2 we have
1815
+ λ5,6(κ¯ε3,1,5 · κ¯ε6,2,4) = κ¯ε3,1,2,4 +
1816
+ 1
1817
+ χ(χ−2)κe2κ¯ε3,1κ¯ε2,4
1818
+
1819
+ 1
1820
+ χ−2(λu,v(κ¯εu,v,3,1)κ¯ε2,4 + κ¯ε3,1λu,v(κ¯εu,v,2,4)).
1821
+ Consider now the I-shaped graph shown in Figure 3 (ii), with the depicted names
1822
+ of the half-edges, ordered as 4 < 3 < 5 < 6 < 1 < 2. Its corresponding permutation
1823
+ is
1824
+ � 4 d 3 c 5 6 1 a 2 b
1825
+ 4 3 5 6 1 2 a b c d
1826
+
1827
+ which is odd. Thus this ordering data represents minus the
1828
+ underlying undecorated I-shaped trivalent graph. Ignoring again the matchings to
1829
+ the legs, it corresponds to
1830
+ λ5,6(κ¯ε4,3,5 · κ¯ε6,1,2) = κ¯ε4,3,1,2 +
1831
+ 1
1832
+ χ(χ−2)κe2κ¯ε4,3κ¯ε1,2
1833
+
1834
+ 1
1835
+ χ−2(λu,v(κ¯εu,v,4,3)κ¯ε1,2 + κ¯ε4,3λu,v(κ¯εu,v,1,2)).
1836
+ The sum of these two expressions therefore represents the image under Φ of
1837
+ the difference H − I of the underlying undecorated trivalent graphs. Furthermore,
1838
+ κ¯ε4,3,1,2 = −κ¯ε3,1,2,4 so these terms cancel.
1839
+ From the proof of Theorem 5.2 we have the identity
1840
+ λu,v(κ¯εu,v,s,t) = χ−2
1841
+ χ−4λi,jλk,l(κ¯εs,i,k · κ¯εl,j,t) −
1842
+ 1
1843
+ χ(χ−4)κe2κ¯εs,t,
1844
+ expressing terms of the form λu,v(κ¯εu,v,s,t) in terms of (2- and) 3-valent vertices.
1845
+ Applying it to the sum of the two expressions above, and collecting terms, therefore
1846
+ gives
1847
+ Φ(H − I) =
1848
+ 1
1849
+ χ(χ−4)κe2�
1850
+ κ¯ε3,1κ¯ε2,4 + κ¯ε4,3κ¯ε2,1�
1851
+
1852
+ 1
1853
+ χ−4
1854
+
1855
+ λi,jλk,l(κ¯ε3,i,k · κ¯εl,j,1)κ¯ε2,4 + κ¯ε3,1λi,jλk,l(κ¯ε2,i,k · κ¯εl,j,4)
1856
+ λi,jλk,l(κ¯ε4,i,k · κ¯εl,j,3)κ¯ε1,2 + κ¯ε4,3λi,jλk,l(κ¯ε1,i,k · κ¯εl,j,2)
1857
+
1858
+ .
1859
+ Using that κe2 = Φ(
1860
+ χ
1861
+ χ−3Θ) and carefully putting the graphs corresponding to the
1862
+ other terms into the normal form of Section 5.3 gives the identity in the statement
1863
+ of the theorem.
1864
+
1865
+ Our relation IHmod is graphically identical to the relation called IHbis
1866
+ 0
1867
+ by
1868
+ Akazawa [Aka05, p. 100] and in the corrigendum [GN07] to the paper of Garo-
1869
+ ufalidis and Nakamura [GN98]. In those papers it is emphasised that IHbis
1870
+ 0
1871
+ means
1872
+ this identity is imposed only when the 4 half-edges belong to distinct edges, but
1873
+ in fact this is redundant: if the 4 half-edges do not belong to distinct edges, then
1874
+ the identity already holds in Graphundec. So in fact imposing our relation IHmod
1875
+ is identical to imposing their relation IHbis
1876
+ 0 .
1877
+ Theorem 5.7. Upon inverting χ − 2, χ − 3, and χ − 4, the maps
1878
+ Graphundec(S)
1879
+ (IHmod)
1880
+ ⊗ det QS
1881
+ Φ
1882
+ −→ Graphtri(S)
1883
+ inc
1884
+ −→ Graph(S)
1885
+ are isomorphisms.
1886
+ Proof. Let R := Q[χ±1, (χ − 2)−1, (χ − 3)−1, (χ − 4)−1] and implicitly base change
1887
+ to this ring. We have already shown in Corollary 5.3 that the second map is an
1888
+ isomorphism, and Φ is certainly an epimorphism, so it remains to show that the
1889
+ composition is a monomorphism.
1890
+
1891
+ 28
1892
+ OSCAR RANDAL-WILLIAMS
1893
+ For an undecorated trivalent graph Γ, define a double edge to be an unordered
1894
+ pair of vertices which share precisely two edges, and a triple edge to be an unordered
1895
+ pair of vertices which share precisely three edges, i.e. form a theta-graph. Define
1896
+ µ(Γ) := 2 · #double edges of Γ + 3 · #triple edges of Γ,
1897
+ filter Graphundec by letting F kGraphundec be spanned by those Γ with µ(Γ) ≥ k,
1898
+ and give Graphundec/(IHmod) the induced filtration.
1899
+ If Γ = ΓH is a graph with µ(Γ) = k and a distinguished “H” subgraph, and ΓI
1900
+ is obtained by replacing this “H”-subgraph by “I”, then by applying the relation
1901
+ IHmod to this subgraph we find that
1902
+ (i) if the edge involved is not part of a double or triple edge then the relation
1903
+ gives ΓH − ΓI ∈ F k+1Graphundec/(IHmod),
1904
+ (ii) if the edge involved is part of a double or triple edge then the relation is trivial
1905
+ (i.e. already holds in Graphundec).
1906
+ Thus the associated graded of the induced filtration on Graphundec/(IHmod) can
1907
+ be described as Graphundec/(IH0), where as in [GN98] the relation IH0 means
1908
+ imposing the “I = H” relation when the four half-edges belong to different edges.
1909
+ Now IH0 is an equivalence relation on the set of isomorphism classes of trivalent
1910
+ graphs without loops, and similarly to [GN98, Proof of Proposition 2.3 (c)] it is
1911
+ easy to see that all connected trivalent graph without loops of the same rank and
1912
+ with the same legs are equivalent to each other: in other words the equivalences
1913
+ classes of such are given by partitions of S (the parts are the legs of each connected
1914
+ component) labelled by a power of e (recording the rank of the graph). It follows
1915
+ that the rank of Graphundec/(IHmod) in each degree, as an R-module, is at most that
1916
+ of Graph(∅) as determined in Lemma 5.1, and so the composition in the statement
1917
+ of the theorem, which is an epimorphism, must be an isomorphism.
1918
+
1919
+ 5.5. On the work of Garoufalidis and Nakamura. The discussion of the last
1920
+ few sections can be used to complete the work of Garoufalidis and Nakamura [GN98,
1921
+ GN07], concerning the calculation of the invariants [Λ∗V13/(V22)]Sp in a stable
1922
+ range. Here we write Vλ for the irreducible Sp-representation corresponding to the
1923
+ partition λ, which was written as [λ]sp in those papers, and V22 denotes the unique
1924
+ copy of this irreducible in Λ2V13.
1925
+ Combining Theorem 1.1 and Proposition 2.3
1926
+ (c) of [GN98] was supposed to calculate [Λ∗V13/(V22)]Sp in a stable range, but for
1927
+ the corrected version of Theorem 1.1 in [GN07], which expresses these invariants as
1928
+ Graphundec(∅)g/(IHbis
1929
+ 0 ), the authors say “it turns out that a simple stable structure
1930
+ of [these invariants] as in Proposition 2.3 (c) will not be easy to detect”. However
1931
+ Theorem 5.7 and equation (5.2) gives that
1932
+ [Λ∗V13/(V22)]Sp ∼= Graphundec(∅)g/(IHbis
1933
+ 0 ) ∼= Graph(∅)g ∼= Q[Γ1, Γ2, . . .]
1934
+ in a stable range. Thus in fact Proposition 2.3 (c) of [GN98] is correct as stated.
1935
+ Remark 5.8. This can also be obtained from the work of Felder, Naef, and Willwacher
1936
+ [FNW21]. Specifically, the graded-commutative algebra A(g) defined just before
1937
+ Theorem 6 of that paper is Λ∗V13/(V22), and Theorem 6 together with Proposition
1938
+ 36 (3) also gives the above.
1939
+ References
1940
+ [Aka05]
1941
+ H. Akazawa, Symplectic invariants arising from a Grassmann quotient and trivalent
1942
+ graphs, Math. J. Okayama Univ. 47 (2005), 99–117.
1943
+ [Bol12]
1944
+ S. K. Boldsen, Improved homological stability for the mapping class group with inte-
1945
+ gral or twisted coefficients, Math. Z. 270 (2012), no. 1-2, 297–329.
1946
+ [FNW21]
1947
+ M. Felder, F. Naef, and T. Willwacher, Stable cohomology of graph complexes,
1948
+ https://arxiv.org/abs/2106.12826, 2021.
1949
+
1950
+ ON THE COHOMOLOGY OF TORELLI GROUPS. II
1951
+ 29
1952
+ [GN98]
1953
+ S. Garoufalidis and H. Nakamura, Some IHX-type relations on trivalent graphs and
1954
+ symplectic representation theory, Math. Res. Lett. 5 (1998), no. 3, 391–402.
1955
+ [GN07]
1956
+ , Corrigendum: “Some IHX-type relations on trivalent graphs and symplectic
1957
+ representation theory” [Math. Res. Lett. 5 (1998), no. 3, 391–402], Math. Res. Lett.
1958
+ 14 (2007), no. 4, 689–690.
1959
+ [GRW14]
1960
+ S. Galatius and O. Randal-Williams, Stable moduli spaces of high-dimensional man-
1961
+ ifolds, Acta Math. 212 (2014), no. 2, 257–377.
1962
+ [GRW17]
1963
+ , Homological stability for moduli spaces of high dimensional manifolds. II,
1964
+ Ann. of Math. (2) 186 (2017), no. 1, 127–204.
1965
+ [GRW18]
1966
+ , Homological stability for moduli spaces of high dimensional manifolds. I, J.
1967
+ Amer. Math. Soc. 31 (2018), no. 1, 215–264.
1968
+ [GRW19]
1969
+ , Moduli spaces of manifolds: a user’s guide, Handbook of homotopy theory,
1970
+ Chapman & Hall/CRC, CRC Press, Boca Raton, FL, 2019, pp. 445–487.
1971
+ [GTMW09] S. Galatius, U. Tillmann, I. Madsen, and M. Weiss, The homotopy type of the cobor-
1972
+ dism category, Acta Math. 202 (2009), no. 2, 195–239.
1973
+ [Hai20]
1974
+ R. Hain, Johnson homomorphisms, EMS Surv. Math. Sci. 7 (2020), no. 1, 33–116.
1975
+ [Joh85]
1976
+ D. Johnson, The structure of the Torelli group. III. The abelianization of T , Topology
1977
+ 24 (1985), no. 2, 127–144.
1978
+ [KM96]
1979
+ N. Kawazumi and S. Morita, The primary approximation to the cohomology of the
1980
+ moduli space of curves and cocycles for the stable characteristic classes, Math. Res.
1981
+ Lett. 3 (1996), no. 5, 629–641.
1982
+ [KM01]
1983
+ ,
1984
+ The
1985
+ primary
1986
+ approximation
1987
+ to
1988
+ the
1989
+ cohomology
1990
+ of
1991
+ the
1992
+ mod-
1993
+ uli
1994
+ space
1995
+ of
1996
+ curves
1997
+ and
1998
+ cocycles
1999
+ for
2000
+ the
2001
+ Mumford-Morita-Miller
2002
+ classes,
2003
+ www.ms.u-tokyo.ac.jp/preprint/pdf/2001-13.pdf, 2001.
2004
+ [Kre79]
2005
+ M.
2006
+ Kreck,
2007
+ Isotopy
2008
+ classes
2009
+ of
2010
+ diffeomorphisms
2011
+ of
2012
+ (k − 1)-connected
2013
+ almost-
2014
+ parallelizable 2k-manifolds, Algebraic topology, Aarhus 1978 (Proc. Sympos., Univ.
2015
+ Aarhus, Aarhus, 1978), Lecture Notes in Math., vol. 763, Springer, Berlin, 1979,
2016
+ pp. 643–663.
2017
+ [KRW20a]
2018
+ A. Kupers and O. Randal-Williams, The cohomology of Torelli groups is algebraic,
2019
+ Forum of Mathematics, Sigma 8 (2020), e64.
2020
+ [KRW20b]
2021
+ , On the cohomology of Torelli groups, Forum of Mathematics, Pi 8 (2020),
2022
+ e7.
2023
+ [KRW21]
2024
+ , On the Torelli Lie algebra, https://arxiv.org/abs/2106.16010, 2021.
2025
+ [Mor96]
2026
+ S. Morita, A linear representation of the mapping class group of orientable sur-
2027
+ faces and characteristic classes of surface bundles, Topology and Teichm¨uller spaces
2028
+ (Katinkulta, 1995), World Sci. Publ., River Edge, NJ, 1996, pp. 159–186.
2029
+ [Qui71]
2030
+ D. Quillen, The spectrum of an equivariant cohomology ring. I, Ann. of Math. (2) 94
2031
+ (1971), 549–572.
2032
+ [RW16]
2033
+ O. Randal-Williams, Resolutions of moduli spaces and homological stability, J. Eur.
2034
+ Math. Soc. (JEMS) 18 (2016), no. 1, 1–81.
2035
+ Email address: o.randal-williams@dpmms.cam.ac.uk
2036
+ Centre for Mathematical Sciences, Wilberforce Road, Cambridge CB3 0WB, UK
2037
+
OtAzT4oBgHgl3EQfIftM/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
PNFJT4oBgHgl3EQfIiwq/content/tmp_files/2301.11456v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
PNFJT4oBgHgl3EQfIiwq/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
QNFPT4oBgHgl3EQfojUh/content/tmp_files/2301.13134v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
QNFPT4oBgHgl3EQfojUh/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
TdE4T4oBgHgl3EQfLwwK/content/tmp_files/2301.04940v1.pdf.txt ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.04940v1 [math.AG] 12 Jan 2023
2
+ FAILURE OF LEFSCHETZ HYPERPLANE THEOREM
3
+ ANANYO DAN
4
+ Abstract. In this article, we give a counterexample to the Lefschetz hyperplane theorem
5
+ for non-singular quasi-projective varieties. A classical result of Hamm-Lˆe shows that Lefschetz
6
+ hyperplane theorem can hold for hyperplanes in general position. We observe that the condition
7
+ of “hyperplane” is strict in the sense that it is not possible to replace it by higher degree
8
+ hypersurfaces. The counterexample is very simple: projective space minus finitely many points.
9
+ Moreover, as an intermediate step we prove that the Grothendieck-Lefschetz theorem also fails
10
+ in the quasi-projective case.
11
+ 1. Introduction
12
+ The underlying field will always be C.
13
+ Consider a non-singular, projective variety Y of
14
+ dimension n. The Lefschetz hyperplane theorem (LHT) states that for any hypersurface X ⊂ Y
15
+ with OX(Y ) very ample, the restriction morphism
16
+ Hk(Y, Z) → Hk(X, Z) is an isomorphism for all k < n − 1 and injective for k = n − 1.
17
+ (1.1)
18
+ If Y is the projective space, then the theorem extends further. In particular, the restriction from
19
+ Hn−1(Pn) to Hn−1(X) is an isomorphism for a very general hypersurface X. The geometry of
20
+ the locus of hypersurfaces where this isomorphism fails (also known as the Noether-Lefschetz
21
+ locus), has been extensively studied [1–4, 11, 12].
22
+ It is therefore evident that the failure of
23
+ the Lefschetz hyperplane theorem can give rise to important questions in Hodge theory and
24
+ deformation theory. The goal of this article is to investigate the failure of this theorem in the
25
+ quasi-projective case.
26
+ It was observed by Hamm and Lˆe [6, 7] that if a hyperplane section X in a quasi-projective
27
+ variety Y is in “general” position, then (1.1) holds true. The criterion for general position, is
28
+ given explicitly in terms of a Whitney stratification of Y (see §2.2). This leads to the natural
29
+ question:
30
+ Question: Is the Hamm-Lˆe theorem (Theorem 2.1) true if we replace “hyperplane” by higher
31
+ degree hypersurface?
32
+ This is true in the case when Y is a projective, non-singular variety. Surprisingly, this can
33
+ fail even if Y is the complement of a single point in a projective space. In particular, we give
34
+ an example of a higher degree hypersurface which satisfies all the conditions in the Hamm-Lˆe
35
+ theorem except for being a hyperplane. Yet, in this case LHT fails. We now discuss this in
36
+ details. Recall, a projective variety X is called non-factorial if the rank of the divisor class
37
+ group Div(X) (i.e., the free abelian group of divisors on X modulo linear equivalence) is not
38
+ the same as the rank of the Picard group Pic(X). We prove:
39
+ Date: January 13, 2023.
40
+ 2020 Mathematics Subject Classification. 14C30, 32S35, 32S50.
41
+ Key words and phrases. Hodge theory, Lefschetz hyperplane theorem, quasi-projective varieties, factoriality,
42
+ Grothendieck-Lefschetz theorem, Picard group.
43
+ 1
44
+
45
+ 2
46
+ ANANYO DAN
47
+ Theorem 1.1. Let X ⊂ Pn be a non-factorial hypersurface with isolated singularities with
48
+ n ≥ 4. Denote by Xsing the singular locus of X. Then, the natural restriction morphism
49
+ H2(Pn\Xsing, Z) → H2(X\Xsing, Z)
50
+ is not surjective.
51
+ Using this theorem we now give an explicit example.
52
+ Example 1.2. Let X ⊂ P4 be a hypersurface defined by the equation X2
53
+ 0 + X2
54
+ 1 + X2
55
+ 2 + X2
56
+ 3,
57
+ where X0, ..., X4 are the coordinates on P4. Clearly, X has exactly one singular point x = [0 :
58
+ 0 : 0 : 0 : 1]. The divisor class group Div(X) is isomorphic to Z ⊕ Z (see [8, Ex. II.6.5]). By
59
+ Lefschetz hyperplane theorem, we have H2(X, Z) ∼= Z. Using the exponential exact sequence,
60
+ one can check that Pic(X) ∼= Z. Hence, X is non-factorial. Theorem 1.1 then implies that the
61
+ restriction morphism from H2(P4\{x}, Z) to H2(X\{x}, Z) is not surjective.
62
+ As an intermediate step we show that the Grothendieck-Lefschetz theorem [5] fails in the
63
+ quasi-projective case (see Remark 3.2).
64
+ Acknowledgement: I thank Dr. I. Kaur for discussions. The author was funded by EPSRC
65
+ grant number EP/T019379/1.
66
+ 2. On the Hamm-Lˆe result
67
+ In [7], Hamm and Lˆe proved a version of the Lefschetz hyperplane theorem for quasi-projective
68
+ varieties (see Theorem 2.1 below). The proof follows in two stages. We use notations as in §2.1
69
+ below. The first step is to check that for all i ≤ dim(Y ) − 2, Hi(Y \Z) (resp. Hm−1(Y \Z)) is
70
+ isomorphic to (resp. contained in) the i-th (resp. (m − 1)-th) cohomology of Vr(L) ∩ (Y \Z),
71
+ for some neighbourhood Vr(L) of L of “radius” r, for almost all r > 0 (see [7, Theorem 1.1.1]).
72
+ The second step is to check whether L ∩ (Y \Z) is a deformation retract of Vr(L) ∩ (Y \Z). One
73
+ observes that this holds true if L is in a “general” position. An explicit description of the general
74
+ position will be mentioned in Theorem 2.1 below.
75
+ 2.1. Setup. Let Y be a projective subvariety of dimension m in Pn, Z ⊂ Y be an algebraic
76
+ subspace and L ⊂ Pn a hyperplane in Pn such that Y \(Z ∪ L) is non-singular. Consider a
77
+ stratification {Yi}i∈I of Y satisfying the following conditions:
78
+ (1) each Yi is a real semi-algebraic subset of Y ,
79
+ (2) {Yi} is a Whitney stratification,
80
+ (3) Z is a union of some of the strata,
81
+ (4) the stratification satisfies the Thom condition for the following function:
82
+ τ : Y → R, sending y ∈ Y to
83
+ k�
84
+ i=1
85
+ |fi(y)|2d/di
86
+ n�
87
+ i=0
88
+ |yi|2d
89
+ , where y = (y1, ..., yn),
90
+ Z is defined by the homogeneous polynomials f1, ..., fk of degrees di, respectively and d
91
+ is the l.c.m. of the di’s. See [10, §1.4.4] for the precise definition.
92
+ 2.2. On the Hamm-Lˆe result. Let Ω be the set of complex projective hyperplanes of Pn
93
+ transverse to all the strata Yi.
94
+
95
+ LEFSCHETZ THEOREM
96
+ 3
97
+ Theorem 2.1. (Hamm-Lˆe [7, Theorem 1.1.3]) Assume that Y \Z is non-singular. Then, for any
98
+ L ∈ Ω we have
99
+ Hk(Y \Z, L ∩ (Y \Z)) = 0 for all k ≤ m − 1.
100
+ In other words, the natural morphism from Hk(Y \Z, Z) to Hk(L∩(Y \Z), Z) is an isomorphism
101
+ for all k ≤ m − 2 and injective for k = m − 1.
102
+ We now write the stratification relevant to Example 1.2.
103
+ Remark 2.2. Take Y = P4 ⊂ P5 defined by z5 = 0, where zi are the coordinates on P5. Take
104
+ Z := [0, 0, 0, 0, 1, 0] the closed point in Y . Take the stratification of Y consisting of
105
+ (Y \Z)
106
+
107
+ Z.
108
+ Then, the equations defining Z in P5 are given by fi := zi for 0 ≤ i ≤ 3 and f5 := z5. The
109
+ function τ is simply
110
+ τ :=
111
+ |z5|2 +
112
+ 3�
113
+ i=0
114
+ |zi|2
115
+ 5�
116
+ i=0
117
+ |zi|2
118
+ .
119
+ Note that this stratification satisfies conditions (1)-(4) in §2.1 above, with the stratification on R
120
+ given by R\{0} �{0}. Finally, note that the hypersurface X in P5 defined by z2
121
+ 0 +z2
122
+ 1 +z2
123
+ 2 +z2
124
+ 3+z2
125
+ 5
126
+ is singular at the point Z. As a result X is transverse to all the strata of Y . We will observe in
127
+ Theorem 1.1 that if we replace L in Theorem 2.1 above by X, then the conclusion fails.
128
+ 3. Proof of Main theorem
129
+ We will assume that the reader has basic familiarity with local cohomology. See [9] for basic
130
+ definitions and results in this topic.
131
+ Let X ⊂ Pn be a non-factorial hypersurface with isolated singularities with n ≥ 4. Denote by
132
+ Xsing the singular locus of X, Y := Pn\Xsing and Xsm := X\Xsing. We first show:
133
+ Proposition 3.1. The cohomology groups H1(OY ), H2(OY ) and H1(OXsm) all vanish, in both
134
+ analytic as well as Zariski topology.
135
+ Proof. Recall, the long exact sequence for local cohomology groups, which exists in both topolo-
136
+ gies (see [9, Corollary 1.9]):
137
+ ... → H1(OPn) → H1(OY ) → H2
138
+ Xsing(OPn) → H2(OPn) → H2(OY ) → H3
139
+ Xsing(OPn) → ...
140
+ Recall, H1(OPn) = 0 = H2(OPn). By Serre’s GAGA, H1(O
141
+ an
142
+ Pn) = 0 = H2(O
143
+ an
144
+ Pn). To prove the
145
+ vanishing of H1(OY ) and H2(OY ), we simply need to prove the vanishing of Hi
146
+ Xsing(OPn) for
147
+ i = 2, 3 in both topologies.
148
+ Consider the spectral sequence (see [9, Proposition 1.4]):
149
+ Ep,q
150
+ 2
151
+ = Hp(Pn, Hq
152
+ Xsing(OPn)) ⇒ Hp+q
153
+ Xsing(OPn).
154
+ (3.1)
155
+ We are interested in the cases when p + q equals 2 or 3. Since n ≥ 4 and Xsing are closed points,
156
+ we have (see [13, Proposition 1.2])
157
+ Hq
158
+ Xsing(OPn) = 0 for q ≤ 3.
159
+ This implies that Ep,q
160
+ 2
161
+ = 0 for p + q equals 2 or 3. Hence the spectral sequence degenerates at
162
+ E2 in this case and Hi
163
+ Xsing(OPn) = 0 in both topologies. This proves the vanishing of H1(OY )
164
+ and H2(OY ).
165
+
166
+ 4
167
+ ANANYO DAN
168
+ The proof for the vanishing of H1(OXsm) follows similarly. In particular, using [9, Corollary
169
+ 1.9], it suffices to check the vanishing of H1(OX) and H2
170
+ Xsing(OX). Since X is a hypersurface
171
+ in Pn and n ≥ 4, H1(OX) = 0.
172
+ By Serre’s GAGA, H1(O
173
+ an
174
+ X ) = 0. To prove the vanishing
175
+ of H2
176
+ sing(OX) use the spectral sequence (3.1) above after replacing Pn by X and p + q = 2.
177
+ Since dim X ≥ 3, [13, Proposition 1.2] implies that Hq
178
+ Xsing(OX) = 0 for q ≤ 2. This implies
179
+ that the spectral sequence degenerates at E2 and H2
180
+ Xsing(OX) = 0 in both topologies. Hence,
181
+ H1(OXsm) = 0 in both topologies. This proves the proposition.
182
+
183
+ Proof of the main theorem. We prove the theorem by contradiction. Suppose that the restric-
184
+ tion morphism from H2(Y, Z) to H2(X, Z) is surjective. Comparing the long exact sequences
185
+ associated to the exponential exact sequence for Y and Xsm we get the following diagram where
186
+ the horizontal rows are exact:
187
+ H1(OY )
188
+ ✲ H1(O∗
189
+ Y )
190
+ ∂1✲ H2(Y, Z)
191
+ ✲ H2(OY )
192
+
193
+
194
+
195
+ H1(OXsm)
196
+
197
+ ✲ H1(O∗
198
+ Xsm)
199
+ ρ′
200
+
201
+ ∂2✲ H2(Xsm, Z)
202
+ ρ
203
+
204
+ ✲ H2(OXsm)
205
+
206
+ (3.2)
207
+ Using the vanishing results from Proposition 3.1, we conclude that ∂1 is an isomorphism and ∂2
208
+ is injective. By assumption, ρ is surjective. We claim that ρ′ is surjective. Indeed, given α ∈
209
+ H1(O∗
210
+ Xsm), the surjectivity of ρ implies that there exists β ∈ H2(Y, Z) such that ρ(β) = ∂2(α).
211
+ Since ∂1 is an isomorphism, there exist α′ ∈ H1(O∗
212
+ Y ) mapping to β via ∂1. Using the injectivity
213
+ of ∂2 and the commutativity of the middle square, we have ρ′(α′) = α. This proves the claim.
214
+ Since ρ′ is surjective, we have the following surjective morphism:
215
+ Z = Pic(Pn) ∼= Pic(Y )
216
+ ρ′
217
+ ։ Pic(Xsm) ∼= Div(X)
218
+ (3.3)
219
+ where the second and the last isomorphisms follow from the fact that Xsing is of codimensional
220
+ at least 2 in X and Pn. By Lefschetz hyperplane theorem, we have H2(X, Z) ∼= H2(Pn, Z) = Z,
221
+ generated by the class of the hyperplane section. Note that, H1(OX) and H2(OX) vanish (use [8,
222
+ Ex. III.5.5] and n ≥ 4). Using the exponential short exact sequence for X, we conclude that
223
+ Pic(X) ∼= Z. Combining with (3.3), this implies rk Div(X) = rk Pic(X). But this contradicts
224
+ the fact that X is non-factorial. Hence, the restriction morphism from H2(Y, Z) to H2(X, Z)
225
+ cannot be surjective. This proves the theorem.
226
+
227
+ Remark 3.2. Let X be as in Theorem 1.1. Then, the restriction morphism
228
+ Pic(Pn\Xsing) → Pic(X\Xsing)
229
+ is not surjective. Indeed,
230
+ Pic(Pn\Xsing) ∼= Pic(Pn) ∼= Z and Pic(X\Xsing) ∼= Div(X).
231
+ By Lefschetz hyperplane theorem for projective hypersurfaces, we have Pic(X) ∼= Z. Since X is
232
+ non-factorial, the rank of Div(X) is not the same as that of Pic(X). Therefore, Pic(Pn\Xsing)
233
+ cannot be isomorphic to Pic(X\Xsing).
234
+ References
235
+ [1] C. Ciliberto, J. Harris, and R. Miranda. General components of the Noether-Lefschetz locus and their density
236
+ in the space of all surfaces. Mathematische Annalen, 282(4):667–680, 1988.
237
+ [2] A. Dan. On a conjecture by Griffiths and Harris concerning certain Noether–Lefschetz loci. Communications
238
+ in Contemporary Mathematics, 17(5):1550002, 2015.
239
+ [3] A. Dan. On a conjecture of Harris. Communications in Contemporary Mathematics, 23(07):2050028, 2021.
240
+
241
+ LEFSCHETZ THEOREM
242
+ 5
243
+ [4] M. Green. A new proof of the explicit Noether-Lefschetz theorem. J. Differential Geometry, 27:155–159, 1988.
244
+ [5] A. Grothendieck. SGA 2. S´eminaire de G´eom´etrie Alg´ebrique du Bois Marie-1962-Cohomologie locale des
245
+ faisceaux coh´erents et th´eoremes de Lefschetz locaux et globaux (North-Holland, Amsterdam), 1968.
246
+ [6] H. Hamm. Lefschetz theorems for singular varieties. In Proceedings of symposia in pure mathematics, vol-
247
+ ume 40, pages 547–557. AMS, 1983.
248
+ [7] H. Hamm and D. T. Lˆe. Lefschetz theorems on quasi-projective varieties. Bulletin de la Soci´et´e math´ematique
249
+ de France, 113:123–142, 1985.
250
+ [8] R. Hartshorne. Algebraic Geometry. Graduate text in Mathematics-52. Springer-Verlag, 1977.
251
+ [9] R. Hartshorne. Local Cohomology: A Seminar Given by A. Groethendieck, Harvard University. Fall, 1961,
252
+ volume 41. Springer, 2006.
253
+ [10] D. T. Lˆe and B. Teissier. Cycles ´evanescents, sections planes et conditions de whitney ii, singularities, part
254
+ 2 (arcata, calif., 1981), 65-103. In Proc. Sympos. Pure Math, volume 40.
255
+ [11] C. Voisin. Une pr´ecision concernant le th´eor`eme de Noether. Math. Ann., 280(4):605–611, 1988.
256
+ [12] C. Voisin. Sur le lieu de Noether-Lefschetz en degr´es 6 et 7. Compositio Mathematica, 75(1):47–68, 1990.
257
+ [13] Y. Yoshino. Maximal Cohen-Macaulay Modules Over Cohen-Macaulay Rings, volume 146. Cambridge Uni-
258
+ versity Press, 1990.
259
+ School of Mathematics and Statistics, University of Sheffield, Hicks building, Hounsfield Road,
260
+ S3 7RH, UK
261
+ Email address: a.dan@sheffield.ac.uk
262
+
TdE4T4oBgHgl3EQfLwwK/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf,len=266
2
+ page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
3
+ page_content='04940v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
4
+ page_content='AG] 12 Jan 2023 FAILURE OF LEFSCHETZ HYPERPLANE THEOREM ANANYO DAN Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
5
+ page_content=' In this article, we give a counterexample to the Lefschetz hyperplane theorem for non-singular quasi-projective varieties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
6
+ page_content=' A classical result of Hamm-Lˆe shows that Lefschetz hyperplane theorem can hold for hyperplanes in general position.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
7
+ page_content=' We observe that the condition of “hyperplane” is strict in the sense that it is not possible to replace it by higher degree hypersurfaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
8
+ page_content=' The counterexample is very simple: projective space minus finitely many points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
9
+ page_content=' Moreover, as an intermediate step we prove that the Grothendieck-Lefschetz theorem also fails in the quasi-projective case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
10
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
11
+ page_content=' Introduction The underlying field will always be C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
12
+ page_content=' Consider a non-singular, projective variety Y of dimension n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
13
+ page_content=' The Lefschetz hyperplane theorem (LHT) states that for any hypersurface X ⊂ Y with OX(Y ) very ample, the restriction morphism Hk(Y, Z) → Hk(X, Z) is an isomorphism for all k < n − 1 and injective for k = n − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
14
+ page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
15
+ page_content='1) If Y is the projective space, then the theorem extends further.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
16
+ page_content=' In particular, the restriction from Hn−1(Pn) to Hn−1(X) is an isomorphism for a very general hypersurface X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
17
+ page_content=' The geometry of the locus of hypersurfaces where this isomorphism fails (also known as the Noether-Lefschetz locus), has been extensively studied [1–4, 11, 12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
18
+ page_content=' It is therefore evident that the failure of the Lefschetz hyperplane theorem can give rise to important questions in Hodge theory and deformation theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
19
+ page_content=' The goal of this article is to investigate the failure of this theorem in the quasi-projective case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
20
+ page_content=' It was observed by Hamm and Lˆe [6, 7] that if a hyperplane section X in a quasi-projective variety Y is in “general” position, then (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
21
+ page_content='1) holds true.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
22
+ page_content=' The criterion for general position, is given explicitly in terms of a Whitney stratification of Y (see §2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
23
+ page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
24
+ page_content=' This leads to the natural question: Question: Is the Hamm-Lˆe theorem (Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
25
+ page_content='1) true if we replace “hyperplane” by higher degree hypersurface?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
26
+ page_content=' This is true in the case when Y is a projective, non-singular variety.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
27
+ page_content=' Surprisingly, this can fail even if Y is the complement of a single point in a projective space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
28
+ page_content=' In particular, we give an example of a higher degree hypersurface which satisfies all the conditions in the Hamm-Lˆe theorem except for being a hyperplane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
29
+ page_content=' Yet, in this case LHT fails.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
30
+ page_content=' We now discuss this in details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
31
+ page_content=' Recall, a projective variety X is called non-factorial if the rank of the divisor class group Div(X) (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
32
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
33
+ page_content=', the free abelian group of divisors on X modulo linear equivalence) is not the same as the rank of the Picard group Pic(X).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
34
+ page_content=' We prove: Date: January 13, 2023.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
35
+ page_content=' 2020 Mathematics Subject Classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
36
+ page_content=' 14C30, 32S35, 32S50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
37
+ page_content=' Key words and phrases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
38
+ page_content=' Hodge theory, Lefschetz hyperplane theorem, quasi-projective varieties, factoriality, Grothendieck-Lefschetz theorem, Picard group.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
39
+ page_content=' 1 2 ANANYO DAN Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
40
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
41
+ page_content=' Let X ⊂ Pn be a non-factorial hypersurface with isolated singularities with n ≥ 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
42
+ page_content=' Denote by Xsing the singular locus of X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
43
+ page_content=' Then, the natural restriction morphism H2(Pn\\Xsing, Z) → H2(X\\Xsing, Z) is not surjective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
44
+ page_content=' Using this theorem we now give an explicit example.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
45
+ page_content=' Example 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
46
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
47
+ page_content=' Let X ⊂ P4 be a hypersurface defined by the equation X2 0 + X2 1 + X2 2 + X2 3, where X0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
48
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
49
+ page_content=', X4 are the coordinates on P4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
50
+ page_content=' Clearly, X has exactly one singular point x = [0 : 0 : 0 : 0 : 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
51
+ page_content=' The divisor class group Div(X) is isomorphic to Z ⊕ Z (see [8, Ex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
52
+ page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
53
+ page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
54
+ page_content='5]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
55
+ page_content=' By Lefschetz hyperplane theorem, we have H2(X, Z) ∼= Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
56
+ page_content=' Using the exponential exact sequence, one can check that Pic(X) ∼= Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
57
+ page_content=' Hence, X is non-factorial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
58
+ page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
59
+ page_content='1 then implies that the restriction morphism from H2(P4\\{x}, Z) to H2(X\\{x}, Z) is not surjective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
60
+ page_content=' As an intermediate step we show that the Grothendieck-Lefschetz theorem [5] fails in the quasi-projective case (see Remark 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
61
+ page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
62
+ page_content=' Acknowledgement: I thank Dr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
63
+ page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
64
+ page_content=' Kaur for discussions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
65
+ page_content=' The author was funded by EPSRC grant number EP/T019379/1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
66
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
67
+ page_content=' On the Hamm-Lˆe result In [7], Hamm and Lˆe proved a version of the Lefschetz hyperplane theorem for quasi-projective varieties (see Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
68
+ page_content='1 below).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
69
+ page_content=' The proof follows in two stages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
70
+ page_content=' We use notations as in §2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
71
+ page_content='1 below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
72
+ page_content=' The first step is to check that for all i ≤ dim(Y ) − 2, Hi(Y \\Z) (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
73
+ page_content=' Hm−1(Y \\Z)) is isomorphic to (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
74
+ page_content=' contained in) the i-th (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
75
+ page_content=' (m − 1)-th) cohomology of Vr(L) ∩ (Y \\Z), for some neighbourhood Vr(L) of L of “radius” r, for almost all r > 0 (see [7, Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
76
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
77
+ page_content='1]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
78
+ page_content=' The second step is to check whether L ∩ (Y \\Z) is a deformation retract of Vr(L) ∩ (Y \\Z).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
79
+ page_content=' One observes that this holds true if L is in a “general” position.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
80
+ page_content=' An explicit description of the general position will be mentioned in Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
81
+ page_content='1 below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
82
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
83
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
84
+ page_content=' Setup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
85
+ page_content=' Let Y be a projective subvariety of dimension m in Pn, Z ⊂ Y be an algebraic subspace and L ⊂ Pn a hyperplane in Pn such that Y \\(Z ∪ L) is non-singular.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
86
+ page_content=' Consider a stratification {Yi}i∈I of Y satisfying the following conditions: (1) each Yi is a real semi-algebraic subset of Y , (2) {Yi} is a Whitney stratification, (3) Z is a union of some of the strata, (4) the stratification satisfies the Thom condition for the following function: τ : Y → R, sending y ∈ Y to k� i=1 |fi(y)|2d/di n� i=0 |yi|2d , where y = (y1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
87
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
88
+ page_content=', yn), Z is defined by the homogeneous polynomials f1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
89
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
90
+ page_content=', fk of degrees di, respectively and d is the l.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
91
+ page_content='c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
92
+ page_content='m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
93
+ page_content=' of the di’s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
94
+ page_content=' See [10, §1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
95
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
96
+ page_content='4] for the precise definition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
97
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
98
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
99
+ page_content=' On the Hamm-Lˆe result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
100
+ page_content=' Let Ω be the set of complex projective hyperplanes of Pn transverse to all the strata Yi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
101
+ page_content=' LEFSCHETZ THEOREM 3 Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
102
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
103
+ page_content=' (Hamm-Lˆe [7, Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
104
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
105
+ page_content='3]) Assume that Y \\Z is non-singular.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
106
+ page_content=' Then, for any L ∈ Ω we have Hk(Y \\Z, L ∩ (Y \\Z)) = 0 for all k ≤ m − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
107
+ page_content=' In other words, the natural morphism from Hk(Y \\Z, Z) to Hk(L∩(Y \\Z), Z) is an isomorphism for all k ≤ m − 2 and injective for k = m − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
108
+ page_content=' We now write the stratification relevant to Example 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
109
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
110
+ page_content=' Remark 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
111
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
112
+ page_content=' Take Y = P4 ⊂ P5 defined by z5 = 0, where zi are the coordinates on P5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
113
+ page_content=' Take Z := [0, 0, 0, 0, 1, 0] the closed point in Y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
114
+ page_content=' Take the stratification of Y consisting of (Y \\Z) � Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
115
+ page_content=' Then, the equations defining Z in P5 are given by fi := zi for 0 ≤ i ≤ 3 and f5 := z5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
116
+ page_content=' The function τ is simply τ := |z5|2 + 3� i=0 |zi|2 5� i=0 |zi|2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
117
+ page_content=' Note that this stratification satisfies conditions (1)-(4) in §2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
118
+ page_content='1 above, with the stratification on R given by R\\{0} �{0}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
119
+ page_content=' Finally, note that the hypersurface X in P5 defined by z2 0 +z2 1 +z2 2 +z2 3+z2 5 is singular at the point Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
120
+ page_content=' As a result X is transverse to all the strata of Y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
121
+ page_content=' We will observe in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
122
+ page_content='1 that if we replace L in Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
123
+ page_content='1 above by X, then the conclusion fails.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
124
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
125
+ page_content=' Proof of Main theorem We will assume that the reader has basic familiarity with local cohomology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
126
+ page_content=' See [9] for basic definitions and results in this topic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
127
+ page_content=' Let X ⊂ Pn be a non-factorial hypersurface with isolated singularities with n ≥ 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
128
+ page_content=' Denote by Xsing the singular locus of X, Y := Pn\\Xsing and Xsm := X\\Xsing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
129
+ page_content=' We first show: Proposition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
130
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
131
+ page_content=' The cohomology groups H1(OY ), H2(OY ) and H1(OXsm) all vanish, in both analytic as well as Zariski topology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
132
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
133
+ page_content=' Recall, the long exact sequence for local cohomology groups, which exists in both topolo- gies (see [9, Corollary 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
134
+ page_content='9]): .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
135
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
136
+ page_content=' → H1(OPn) → H1(OY ) → H2 Xsing(OPn) → H2(OPn) → H2(OY ) → H3 Xsing(OPn) → .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
137
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
138
+ page_content=' Recall, H1(OPn) = 0 = H2(OPn).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
139
+ page_content=' By Serre’s GAGA, H1(O an Pn) = 0 = H2(O an Pn).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
140
+ page_content=' To prove the vanishing of H1(OY ) and H2(OY ), we simply need to prove the vanishing of Hi Xsing(OPn) for i = 2, 3 in both topologies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
141
+ page_content=' Consider the spectral sequence (see [9, Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
142
+ page_content='4]): Ep,q 2 = Hp(Pn, Hq Xsing(OPn)) ⇒ Hp+q Xsing(OPn).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
143
+ page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
144
+ page_content='1) We are interested in the cases when p + q equals 2 or 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
145
+ page_content=' Since n ≥ 4 and Xsing are closed points, we have (see [13, Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
146
+ page_content='2]) Hq Xsing(OPn) = 0 for q ≤ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
147
+ page_content=' This implies that Ep,q 2 = 0 for p + q equals 2 or 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
148
+ page_content=' Hence the spectral sequence degenerates at E2 in this case and Hi Xsing(OPn) = 0 in both topologies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
149
+ page_content=' This proves the vanishing of H1(OY ) and H2(OY ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
150
+ page_content=' 4 ANANYO DAN The proof for the vanishing of H1(OXsm) follows similarly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
151
+ page_content=' In particular, using [9, Corollary 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
152
+ page_content='9], it suffices to check the vanishing of H1(OX) and H2 Xsing(OX).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
153
+ page_content=' Since X is a hypersurface in Pn and n ≥ 4, H1(OX) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
154
+ page_content=' By Serre’s GAGA, H1(O an X ) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
155
+ page_content=' To prove the vanishing of H2 sing(OX) use the spectral sequence (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
156
+ page_content='1) above after replacing Pn by X and p + q = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
157
+ page_content=' Since dim X ≥ 3, [13, Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
158
+ page_content='2] implies that Hq Xsing(OX) = 0 for q ≤ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
159
+ page_content=' This implies that the spectral sequence degenerates at E2 and H2 Xsing(OX) = 0 in both topologies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
160
+ page_content=' Hence, H1(OXsm) = 0 in both topologies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
161
+ page_content=' This proves the proposition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
162
+ page_content=' □ Proof of the main theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
163
+ page_content=' We prove the theorem by contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
164
+ page_content=' Suppose that the restric- tion morphism from H2(Y, Z) to H2(X, Z) is surjective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
165
+ page_content=' Comparing the long exact sequences associated to the exponential exact sequence for Y and Xsm we get the following diagram where the horizontal rows are exact: H1(OY ) ✲ H1(O∗ Y ) ∂1✲ H2(Y, Z) ✲ H2(OY ) ⟲ ⟲ ⟲ H1(OXsm) ❄ ✲ H1(O∗ Xsm) ρ′ ❄ ∂2✲ H2(Xsm, Z) ρ ❄ ✲ H2(OXsm) ❄ (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
166
+ page_content='2) Using the vanishing results from Proposition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
167
+ page_content='1, we conclude that ∂1 is an isomorphism and ∂2 is injective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
168
+ page_content=' By assumption, ρ is surjective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
169
+ page_content=' We claim that ρ′ is surjective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
170
+ page_content=' Indeed, given α ∈ H1(O∗ Xsm), the surjectivity of ρ implies that there exists β ∈ H2(Y, Z) such that ρ(β) = ∂2(α).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
171
+ page_content=' Since ∂1 is an isomorphism, there exist α′ ∈ H1(O∗ Y ) mapping to β via ∂1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
172
+ page_content=' Using the injectivity of ∂2 and the commutativity of the middle square, we have ρ′(α′) = α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
173
+ page_content=' This proves the claim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
174
+ page_content=' Since ρ′ is surjective, we have the following surjective morphism: Z = Pic(Pn) ∼= Pic(Y ) ρ′ ։ Pic(Xsm) ∼= Div(X) (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
175
+ page_content='3) where the second and the last isomorphisms follow from the fact that Xsing is of codimensional at least 2 in X and Pn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
176
+ page_content=' By Lefschetz hyperplane theorem, we have H2(X, Z) ∼= H2(Pn, Z) = Z, generated by the class of the hyperplane section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
177
+ page_content=' Note that, H1(OX) and H2(OX) vanish (use [8, Ex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
178
+ page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
179
+ page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
180
+ page_content='5] and n ≥ 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
181
+ page_content=' Using the exponential short exact sequence for X, we conclude that Pic(X) ∼= Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
182
+ page_content=' Combining with (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
183
+ page_content='3), this implies rk Div(X) = rk Pic(X).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
184
+ page_content=' But this contradicts the fact that X is non-factorial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
185
+ page_content=' Hence, the restriction morphism from H2(Y, Z) to H2(X, Z) cannot be surjective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
186
+ page_content=' This proves the theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
187
+ page_content=' □ Remark 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
188
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
189
+ page_content=' Let X be as in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
190
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
191
+ page_content=' Then, the restriction morphism Pic(Pn\\Xsing) → Pic(X\\Xsing) is not surjective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
192
+ page_content=' Indeed, Pic(Pn\\Xsing) ∼= Pic(Pn) ∼= Z and Pic(X\\Xsing) ∼= Div(X).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
193
+ page_content=' By Lefschetz hyperplane theorem for projective hypersurfaces, we have Pic(X) ∼= Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
194
+ page_content=' Since X is non-factorial, the rank of Div(X) is not the same as that of Pic(X).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
195
+ page_content=' Therefore, Pic(Pn\\Xsing) cannot be isomorphic to Pic(X\\Xsing).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
196
+ page_content=' References [1] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
197
+ page_content=' Ciliberto, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
198
+ page_content=' Harris, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
199
+ page_content=' Miranda.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
200
+ page_content=' General components of the Noether-Lefschetz locus and their density in the space of all surfaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
201
+ page_content=' Mathematische Annalen, 282(4):667–680, 1988.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
202
+ page_content=' [2] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
203
+ page_content=' Dan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
204
+ page_content=' On a conjecture by Griffiths and Harris concerning certain Noether–Lefschetz loci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
205
+ page_content=' Communications in Contemporary Mathematics, 17(5):1550002, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
206
+ page_content=' [3] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
207
+ page_content=' Dan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
208
+ page_content=' On a conjecture of Harris.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
209
+ page_content=' Communications in Contemporary Mathematics, 23(07):2050028, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
210
+ page_content=' LEFSCHETZ THEOREM 5 [4] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
211
+ page_content=' Green.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
212
+ page_content=' A new proof of the explicit Noether-Lefschetz theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
213
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
214
+ page_content=' Differential Geometry, 27:155–159, 1988.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
215
+ page_content=' [5] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
216
+ page_content=' Grothendieck.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
217
+ page_content=' SGA 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
218
+ page_content=' S´eminaire de G´eom´etrie Alg´ebrique du Bois Marie-1962-Cohomologie locale des faisceaux coh´erents et th´eoremes de Lefschetz locaux et globaux (North-Holland, Amsterdam), 1968.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
219
+ page_content=' [6] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
220
+ page_content=' Hamm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
221
+ page_content=' Lefschetz theorems for singular varieties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
222
+ page_content=' In Proceedings of symposia in pure mathematics, vol- ume 40, pages 547–557.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
223
+ page_content=' AMS, 1983.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
224
+ page_content=' [7] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
225
+ page_content=' Hamm and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
226
+ page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
227
+ page_content=' Lˆe.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
228
+ page_content=' Lefschetz theorems on quasi-projective varieties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
229
+ page_content=' Bulletin de la Soci´et´e math´ematique de France, 113:123–142, 1985.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
230
+ page_content=' [8] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
231
+ page_content=' Hartshorne.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
232
+ page_content=' Algebraic Geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
233
+ page_content=' Graduate text in Mathematics-52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
234
+ page_content=' Springer-Verlag, 1977.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
235
+ page_content=' [9] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
236
+ page_content=' Hartshorne.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
237
+ page_content=' Local Cohomology: A Seminar Given by A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
238
+ page_content=' Groethendieck, Harvard University.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
239
+ page_content=' Fall, 1961, volume 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
240
+ page_content=' Springer, 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
241
+ page_content=' [10] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
242
+ page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
243
+ page_content=' Lˆe and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
244
+ page_content=' Teissier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
245
+ page_content=' Cycles ´evanescents, sections planes et conditions de whitney ii, singularities, part 2 (arcata, calif.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
246
+ page_content=', 1981), 65-103.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
247
+ page_content=' In Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
248
+ page_content=' Sympos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
249
+ page_content=' Pure Math, volume 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
250
+ page_content=' [11] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
251
+ page_content=' Voisin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
252
+ page_content=' Une pr´ecision concernant le th´eor`eme de Noether.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
253
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
254
+ page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
255
+ page_content=', 280(4):605–611, 1988.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
256
+ page_content=' [12] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
257
+ page_content=' Voisin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
258
+ page_content=' Sur le lieu de Noether-Lefschetz en degr´es 6 et 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
259
+ page_content=' Compositio Mathematica, 75(1):47–68, 1990.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
260
+ page_content=' [13] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
261
+ page_content=' Yoshino.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
262
+ page_content=' Maximal Cohen-Macaulay Modules Over Cohen-Macaulay Rings, volume 146.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
263
+ page_content=' Cambridge Uni- versity Press, 1990.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
264
+ page_content=' School of Mathematics and Statistics, University of Sheffield, Hicks building, Hounsfield Road, S3 7RH, UK Email address: a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
265
+ page_content='dan@sheffield.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
266
+ page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
267
+ page_content='uk' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdE4T4oBgHgl3EQfLwwK/content/2301.04940v1.pdf'}
XdE0T4oBgHgl3EQfmgEE/content/tmp_files/2301.02498v1.pdf.txt ADDED
@@ -0,0 +1,3030 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Dynamics and stability of the two-body problem with Yukawa
2
+ correction to Newton’s gravity, revisited and applied
3
+ numerically to the solar system
4
+ Nawras Abo Hasan1∗, Nabil Joudieh1†and Nidal Chamoun2‡
5
+ 1 Physics Department, Damascus University, Damascus, Syria
6
+ 2 Physics Department, HIAST, P.O. Box 31983, Damascus, Syria
7
+ Abstract
8
+ In this manuscript, we review the motion of two-body celestial system (planet-sun) for
9
+ a Yukawa-type correction on Newton’s gravitational potential using Hamilton’s formulation.
10
+ We reexamine the stability using the corresponding linearization Jacobian matrix, and verify
11
+ that the Bertrand’s theorem conditions are met for radii ≪ 1015m, and so bound closed orbits
12
+ are expected. Applied to the solar system, we present the equation of motion of the planet,
13
+ then solve it both analytically and numerically. Making use of the analytical expression of
14
+ the orbit, we estimate the Yukawa strength α, and find it larger than the nominal value
15
+ (10−8) adopted in previous studies, in that it is of order (α = 10−4 − 10−5) for terrestrial
16
+ planets (Mercury, Venus, earth, Mars and Pluto) whereas it is even larger (α = 10−3) for
17
+ the Giant planets (Jupiter, Saturn, Uranus and Neptune). Taking as inputs (rmin, vmas, e)
18
+ observed by NASA, we analyse the orbits analytically and numerically for both the estimated
19
+ and nominal values of α, and determine the corresponding trajectories. For each obtained
20
+ orbit we recalculate the characterizing parameters (rmin, rmax, a, b, e) and compare their
21
+ values according to the used potential (Newton with/without Yukawa correction) and to the
22
+ method used (analytical and/or numerical). When compared to the observational data, we
23
+ conclude that the correction on the path due to Yukawa correction is of order of and up to
24
+ 80 million km (20 million km) as a maximum deviation occurring for Neptune (Pluto) for
25
+ nominal (estimated) value of α.
26
+ Keywords: gravitational two-body problem, Yukawa potential, closed orbit
27
+
28
+ 0
29
+ Introduction
30
+ The past several years have witnessed a resurgence of interest in experimental testing of gravity,
31
+ particularly in the possibility of deviations from the predictions of Newtonian gravity, which is
32
+ considered as an excellent approximation of General Relativity (GR) on large distance scale [1].
33
+ Many theoretical models suggest the existence of new, relatively weak, intermediate-range force
34
+ coexisting with gravity such that the net resulting interaction would behave like a new correction
35
+ ∗seagull18990@gmail.com
36
+ †njoudieh@yahoo.fr
37
+ ‡nidal.chamoun@hiast.edu.sy
38
+ 1
39
+ arXiv:2301.02498v1 [astro-ph.EP] 6 Jan 2023
40
+
41
+ to the potentials defining the gravitational field. It is known [2] that there are only two types
42
+ of central potentials, namely the Newton 1r and the Harmonic r2 potentials, where ANY finite
43
+ motion of an object, subject to this central potential, leads to a closed path (Bertrands theorem).
44
+ There are some ‘exceptions’ to this statement, in the sense that there might be closed bound
45
+ trajectories for a central potential different from the Newton and Harmonic ones, which have
46
+ been studied in [3, 4]. In this contribution, we revisit the effect of a Yukawa correction to the
47
+ gravitational force over large distances.
48
+ Theories of massive gravity [5,6], adding a mass term to the graviton (the carrier of gravity),
49
+ have raised a wide interest and the Yukawa potential is the popular parametrization of such
50
+ theories. Actually, many works describing deviations from Newtons inverse square law have
51
+ addressed the Yukawa-type correction. Assuming gravity is exerted by exchanges of gravitons,
52
+ it is clear that a test for the graviton mass (µg) is to ask whether the Newton (1/r) potential
53
+ shows any evidence of dying at large distances because of Yukawa exponential cutoff (e−µgr).
54
+ Since the seventies [7], bounds on the gravitons mass (µg ≤ 1.1 × 10−29 eV) were used to
55
+ put a bound on its compton wavelength considered as a distance scale for Yukawa correction
56
+ (λ ∼ 2π
57
+ µg ≥ 3.7 MPc). The authors of [8] gave a bound on the Yukawa range (λ) in the order of
58
+ (101 − 104 AU) corresponding to (µg ≤ 10−24) eV.
59
+ Theories like Scalar-Tensor-Vector Gravity Theory [9] predict a Yukawa-like fifth force. The
60
+ authors of [10], showed that screened modified gravity can suppress the fifth force in dense regions
61
+ and allow theories to evade the solar system and laboratory tests of the weak equivalence princi-
62
+ ple. In [11], an extended theory of gravity, with a modified potential including post-Newtonian
63
+ terms, whose expansion is different from that of Yukawa correction, called ‘vacuum bootstrapped
64
+ Newtonian gravity’, was subjected to solar system tests, through a procedure which was applied
65
+ to Yukawa corrections at the Galactic center [12], with no significant deviations from GR found.
66
+ In [13], a Keplerian-type parametrization was shown as a solution of the equations of motion
67
+ for a Yukawa-type potential between two bodies. In fact, the two-body solution for alternative
68
+ theories yield a strong constraint for solar system [14, 15], whereas several analyses of Yukawa
69
+ potential for a 2-body system in different contexts were carried out [16, 17]. The orbit of a
70
+ single particle moving under Yukawa potential was studied in [18], and the precessing ellipse
71
+ type orbits were observed. In [19], it was noted that the modified gravity with Yukawa-like
72
+ long-range potential was (un)successful on astrophysical scales (in solar system), whereas an
73
+ analysis of Yukawa potential in f(R) gravity was given in [20]. The work of [21] showed that
74
+ a Yukawa fifth force is expected to be sub-dominant in satellite dynamics and space geodesy
75
+ experiments, as long as they are performed at altitudes greater than a few hundred kilometres.
76
+ The Yukawa strength was estimated in [22] to be (α < 10−5-10−8) for distances of order 109
77
+ cm, whereas the use of laser data from LAGEOS satellites yield a constraint on α of the order
78
+ of 10−12.
79
+ In this letter, we build on work from [23], in which the dynamics and stability of the two body
80
+ problem with a Newtonian potential corrected by a Yukawa term were explored. In particular,
81
+ we reproduced their analytical results and applied them to the study of all the planets of our solar
82
+ system. Solving analytically the planet equation of motion, one finds an elliptical trajectory,
83
+ which one can also obtain numerically using Runge-Kutta method. Starting from the observed
84
+ values of the perihelion distance and velocity (rmin, vmax) and of the tranjectory eccentricity e,
85
+ stated in NASA public results [24]∗, one could determine the ellipsis equation and estimate, for
86
+ ∗Although the standard deviations of the planetary trajectories are not quoted in the NASA public website,
87
+ however one can consider that the corresponding error equals to the last digit of the quoted significant numbers.
88
+ 2
89
+
90
+ Yukawa corrected potential, the Yukawa strength α. One can use this estimated value, or another
91
+ nominal value taken from other studies, to either draw the analytical trajectory and recalculate
92
+ the characterizing parameters: the shortest (longest) distance to the Sun rmin(rmax), the semi
93
+ major (minor) axis a(b) and the eccentricity e, or to solve numerically the equations of motion
94
+ with the Yukawa-corrected potential in order to check the closedness of the resulting trajectory,
95
+ whose characteristics are to be reevaluated again. Later, we compared these results with those
96
+ calculated for the Keplerian motion of planets subject to the pure Newtonian potential, and, in
97
+ addition, showed the compatibility of the results with the observational NASA data.
98
+ More specifically, for the two-body system (planet-sun), the Newtonian potential is given by:
99
+ VN(r) = −GmpM⊙
100
+ r
101
+ (1)
102
+ where G = 6.674×10−11 Nm2
103
+ Kg2 is the gravitational Newton constant, mp (M⊙) is the planet (sun)
104
+ mass. With a Yukawa correction, the gravitational potential becomes
105
+ V (r) = −GmpM⊙
106
+ r
107
+
108
+ 1 + αe− r
109
+ λ
110
+
111
+ = VN(r) + VY k(r)
112
+ (2)
113
+ where VY k is the Yukawa correction to the Newtonian potential and α (λ) represents the
114
+ strength (range) of the Yukawa correction. Previous studies [23, 25] gave the nominal values
115
+ (α = 10−8(λ = 103AU = 1015m). However, our estimations gave a larger order of magnitude for
116
+ the Yukawa strength: α ∼ 10−4 − 10−5 for terrestrial planets (Mercury, Venus, Earth, Mars and
117
+ Pluto) and α ∼ 10−3 for the remaining Giant planets (Jupiter, Saturn, Uranus and Neptune),
118
+ which are in line with [13].
119
+ We saw that for estimated α, the maximum deviation from observed data, which increases
120
+ the further the planet is (20 million km in Pluto), is less than that of the α nominal value
121
+ (80 million km in Neptune), which is plausible considering that the estimation of α is done by
122
+ identifying the factor containing it to observational data.
123
+ For each of the nominal and estimated values of α, we analysed the planet’s trajectory
124
+ both analytically and numerically. Analytics wise, we started from the observational data of
125
+ NASA (rmin, vmax, e) and reconstructed the closed ellipse trajectory of which we re-evaluated
126
+ the characteristics (rmin, rmax, a, b, e) and compared with the pure Newton case and with the
127
+ observational data. Numerics wise, the α determines the potential under which the planet moves,
128
+ and so one can solve the equations of motion numerically using Runge-Kutta method taking as
129
+ initial conditions the observed data of (rmin, vmax), to check that one gets closed trajectories in
130
+ excellent agreement with the elliptical shapes, of which we can evaluate the characteristics that
131
+ one compares to the pure Newtonian case, to the analytical method results and to the observed
132
+ data.
133
+ The manuscript is organized as follows.
134
+ In section (1), we revise the system dynamics
135
+ using Hamilton’s method. In section(2), we state the types of stability and determine the one
136
+ corresponding to the system under study. We discuss, in section(3) and following [23], Bertrand’s
137
+ theorem and get the analytical solution to the equation of motion. Finally, we apply in section
138
+ (4) the obtained approximative analytical results to the study of the solar system planets in
139
+ order to estimate the Yukawa strength and re-determine the trajectory characteristics for both
140
+ estimated and nominal values of α, as well as solve numerically the equations. The results,
141
+ of comparing the analytical/numerical outputs with the observed data according to the used
142
+ In our computations, we used the whole digits allowed by machine precision, however the results in the appendices
143
+ tables showed only significant digits equal to those of the observed data.
144
+ 3
145
+
146
+ potential, are presented in form of plots for all the planets, whereas the corresponding tables
147
+ are given in an appendix. We end up with conclusions in section (5).
148
+ 1
149
+ Hamiltonian formulation
150
+ We start with the Hamiltonian H = T + V where T is the kinetic energy of both masses and V
151
+ is the Gravitational potential energy.
152
+ H =
153
+ ⃗p2
154
+ 1
155
+ 2mp
156
+ +
157
+ ⃗p2
158
+ 2
159
+ 2M⊙
160
+
161
+ K
162
+ |⃗r2 − ⃗r1|
163
+
164
+ 1 + αe− |⃗r2−⃗r1|
165
+ λ
166
+
167
+ (3)
168
+ where ⃗ri, (⃗vi), i = 1, 2 are the positions (velocities) of the two masses with corresponding mo-
169
+ menta p1 = M⊙v1, p2 = mpv2, K = GmpM⊙. Changing to the center of mass frame (c.o.m),
170
+ with
171
+ ⃗r1 = +
172
+ mp
173
+ mp + M⊙
174
+ ⃗r = + µ
175
+ M⊙
176
+ ⃗r + ⃗R
177
+ ,
178
+ ⃗r2 = −
179
+ M⊙
180
+ mp + M⊙
181
+ ⃗r = − µ
182
+ mp
183
+ ⃗r + ⃗R
184
+ (4)
185
+ ⃗r = ⃗r1 − ⃗r2
186
+ ,
187
+ ⃗R = M⊙⃗r1 + mp⃗r2
188
+ mp + M⊙
189
+ (5)
190
+ ⃗v1 = ˙⃗R +
191
+ µ
192
+ M⊙
193
+ ⃗v
194
+ ,
195
+ ⃗v2 = ˙⃗R + −µ
196
+ mp
197
+ ⃗v
198
+ (6)
199
+ ⃗v = ˙⃗r
200
+ ,
201
+ ⃗p = µ⃗v,
202
+ (7)
203
+ ¨⃗R = ⃗0
204
+ ,
205
+ µ¨⃗r = M⊙¨⃗r1 = −mp¨⃗r2,
206
+ (8)
207
+ we get
208
+ H = 1
209
+ 2(M⊙ + mp) ˙⃗R2 + H
210
+ :
211
+ H = p2
212
+ 2µ − K
213
+ r
214
+
215
+ 1 + αe− r
216
+ λ
217
+
218
+ (9)
219
+ Here we have defined µ =
220
+ mpM⊙
221
+ mp+M⊙ as the reduced mass of the system and r = |⃗r|. We switch to
222
+ polar coordinates in the c.o.m to get
223
+ H = 1
224
+
225
+
226
+ p2
227
+ r + p2
228
+ ϕ
229
+ r2
230
+
231
+ − K
232
+ r
233
+
234
+ 1 + αe− r
235
+ λ
236
+
237
+ (10)
238
+ From the canonical equations ( [26]): ˙qi =
239
+
240
+ ∂H
241
+ ∂pi
242
+
243
+ , ˙pi = −
244
+
245
+ ∂H
246
+ ∂qi
247
+
248
+ , and since the Hamiltonian is
249
+ cyclic in ϕ (i.e. it does not depend explicitly on ϕ), we have:
250
+ ˙ϕ = ∂H
251
+ ∂pϕ
252
+ = pϕ
253
+ µr2
254
+ (11)
255
+ ˙pϕ = −∂H
256
+ ∂ϕ = 0 ⇒ pϕ = µr2 ˙ϕ = ℓ = constant
257
+ (12)
258
+ where ℓ is the angular momentum of the two-body system, and therefore Hamiltons equations
259
+ for r become:
260
+ ˙r = ∂H
261
+ ∂pr
262
+ = pr
263
+ µ
264
+ (13)
265
+ ˙pr = −∂H
266
+ ∂r = ℓ2
267
+ µr3 − K
268
+ r2
269
+
270
+ 1 + α
271
+
272
+ 1 + r
273
+ λ
274
+
275
+ e− r
276
+ λ
277
+
278
+ (14)
279
+ 4
280
+
281
+ Figure 1:
282
+ The reduced potential (red line) given for fixed angular momentum (Eq. 16). The
283
+ pink line denotes the magnitude of the purely Yukawa term (
284
+ ���− αK
285
+ r e− r
286
+ λ
287
+ ���), whereas the blue line
288
+ represents the Keplerian reduced potential, i.e. Eq. 16 without the Yukawa term.
289
+ Again, and since H(t) = H(t0) = h is constant during the motion of the masses [26], and since
290
+ p2
291
+ r = µ2 ˙r2 ≥ 0 we get a lower bound for the total energy of the system:
292
+ h ≥
293
+ ℓ2
294
+ 2µr2 − K
295
+ r
296
+
297
+ 1 + αe− r
298
+ λ
299
+
300
+ (15)
301
+ The right hand side of eq. (15) is defined to be the “reduced potential”, which is common in the
302
+ Kepler problem moving from two degrees of freedom to only one (with the Yukawa correction)
303
+ Vred(r) =
304
+ ℓ2
305
+ 2µr2 − K
306
+ r
307
+
308
+ 1 + αe− r
309
+ λ
310
+
311
+ (16)
312
+ One can draw the function for fixed ℓ giving the allowed regions of motion (look at figure 1).
313
+ Note that µ > 0, λ > 0 and α > 0.
314
+ 2
315
+ The linearization matrix
316
+ Following [27], in order to determine the stability of the equilibrium points of the system, we
317
+ must form a matrix differential equation using the system equations of motion (Hamiltons Eqs.
318
+ 13 and 14 for r, p). The linear system has the form:
319
+ d
320
+ dt
321
+
322
+ r
323
+ pr
324
+
325
+ =
326
+
327
+ f(r, p)
328
+ g(r, p)
329
+
330
+ =
331
+
332
+ f0
333
+ g0
334
+
335
+ eq
336
+ +
337
+ � ∂f
338
+ ∂r
339
+ ∂f
340
+ ∂pr
341
+ ∂g
342
+ ∂r
343
+ ∂g
344
+ ∂pr
345
+ � �
346
+ r
347
+ pr
348
+
349
+ where f (r, pr) = pr
350
+ µ ,
351
+ g (r, pr) = ℓ2
352
+ µr3 − K
353
+ r2
354
+
355
+ 1 + α
356
+
357
+ 1 + r
358
+ λ
359
+
360
+ e− r
361
+ λ
362
+
363
+ (17)
364
+ Given that λ = 1015m for orbits of size comparable to the solar system dimensions [28], one can
365
+ assume that r
366
+ λ is small enough that one can Taylor expand the exponential and ignore terms of
367
+ 5
368
+
369
+ 70
370
+ Vred
371
+ 09
372
+ Vkep
373
+ -Vyuk
374
+ 50
375
+ 40
376
+ >
377
+ 30
378
+ 20
379
+ 10
380
+ 0
381
+ -10�
382
+ r2
383
+ λ2
384
+
385
+ , leading to:
386
+ e− r
387
+ λ ≈ 1 − r
388
+ λ + O
389
+ � r2
390
+ λ2
391
+
392
+ ≈ 1 − r
393
+ λ
394
+ (18)
395
+ Thus
396
+ g (r, pr) = ℓ2
397
+ µr3 − K
398
+ r2
399
+
400
+ 1 + α
401
+
402
+ 1 + r
403
+ λ
404
+ � �
405
+ 1 − r
406
+ λ
407
+ ��
408
+ ≈ ℓ2
409
+ µr3 − K
410
+ r2 (1 + α),
411
+ with the Yukawa effect within this approximation being limited to replacing K by K(1+α), which
412
+ tells that the potential shape is still Newtonian (1/r), and according to Bertrand’s theorem every
413
+ bound trajectory is thus closed for small r/λ. One can see this fact directly from Eq. (16) as
414
+ it gives, compared to the Keplerian potential, within the approximation just a shift, in addition
415
+ to the replacement (K → K(1 + α)), which does not interfere in the equations of motion:
416
+ Vred(r)
417
+
418
+ ℓ2
419
+ 2µr2 − K
420
+ r (1 + α) + Kα
421
+ λ .
422
+ (19)
423
+ Consequently, the Jacobian matrix takes the form:
424
+
425
+ ˙r
426
+ ˙pr
427
+ ��
428
+ =
429
+
430
+ 0
431
+ 1
432
+ µ
433
+ −3ℓ2
434
+ µr4 + 2K
435
+ r3 (1 + α)
436
+ 0
437
+ � �
438
+ r
439
+ pr
440
+
441
+ (20)
442
+ where terms of order O
443
+
444
+ r2
445
+ λ2
446
+
447
+ were ignored, and where the equilibrium point (r, pr)eq satisfies
448
+ feq(r, pr) = geq(r, pr) = 0. We can determine the r at equilibrium using (eq. 14) to get upto
449
+ leading order:
450
+ req =
451
+ ℓ2
452
+ µK(1 + α)
453
+ (21)
454
+ We can now test for stability by choosing values of (α, µ, K, ℓ, λ) and finding the eigenvalues
455
+ of the Jacobian matrix (20) after substituting the equilibrium solution found above (eq. 21).
456
+ Recall that the eigenvalues β1, β2 are found by solving the following equation:
457
+ det |J − βI2×2| = 0
458
+ (22)
459
+ with I2×2 referring to the 2 × 2 identity matrix. Thus we have
460
+ �����
461
+ −β
462
+ 1
463
+ µ
464
+ −3ℓ2
465
+ µr4 + 2K
466
+ r3 (1 + α)
467
+ −β
468
+ ����� = 0
469
+ (23)
470
+ The characteristic equation (the eigenvalue equation) becomes:
471
+ β1,2 = 1
472
+ 2
473
+
474
+ τ ±
475
+
476
+ τ 2 − 4∆
477
+
478
+ (24)
479
+ τ = trace(J) = 0
480
+ (25)
481
+ ∆ = det(J) = µ2K4(1 + α)4
482
+ ℓ6
483
+ (26)
484
+ Following [29], the stability is determined by the sign of the eigenvalues. Since ∆ > 0, we have
485
+ the following cases:
486
+ • τ < 0, τ 2 − 4∆ > 0 ⇒ (r0, pr0) a stable node.
487
+ • τ < 0, τ 2 − 4∆ < 0 ⇒ (r0, pr0) a stable spiral.
488
+ 6
489
+
490
+ • τ > 0, τ 2 − 4∆ > 0 ⇒ (r0, pr0) an unstable node.
491
+ • τ > 0, τ 2 − 4∆ < 0 ⇒ (r0, pr0) an unstable spiral.
492
+ • τ = 0, τ 2 − 4∆ < 0 ⇒ (r0, pr0) a neutrally stable center (which is our case).
493
+ Actually, the stability refers to how the solution behaves near the equilibrium point; in that
494
+ unstable solutions grow to infinity, whereas stable solutions tend to zero. Also, it is the imaginary
495
+ cases which are the ones giving bound orbital solutions (specifically the center case, whereas the
496
+ stable and unstable imaginary cases are bound solutions tending towards or away from zero).
497
+ 3
498
+ Stability & Bertrands theorem
499
+ First, we rewrite the eigenvalue equation in the form
500
+ β2 + µ2K4(1 + α)4
501
+ ℓ6
502
+ = 0
503
+ (27)
504
+ leading to:
505
+ β = ±iµK2(1 + α)2
506
+ ℓ3
507
+ (28)
508
+ We note that one can study the case for a purely Newtonian Potential by letting α → 0.
509
+ Similarly, by ignoring the terms derived from the Newtonian potential, one can single out the
510
+ pure Yukawa contribution. In these two extreme cases, the characteristic equations becomes
511
+ Pure Newtonian: β2 + µ2K4
512
+ ℓ6
513
+ = 0
514
+ (29)
515
+ Pure Yukawa: β2 + µ2K4α4
516
+ ℓ6
517
+ = 0
518
+ (30)
519
+ giving
520
+ Pure Newtonian: β = ±iµK2
521
+ ℓ3
522
+ (31)
523
+ Pure Yukawa: β = ±iµK2α2
524
+ ℓ3
525
+ (32)
526
+ Thus, the equilibrium points for the purely Newtonian, the purely Yukawa, and the Newton
527
+ plus Yukawa Potentials remain center solutions. This implies that the motion would remain
528
+ restricted to ellipses about the equilibrium point; and so, orbits near the equilibrium point are
529
+ possible (further away from the equilibrium point one would have unbounded solutions, as Fig.
530
+ 1 shows). This proves that for small r/λ we have stable, closed orbits.
531
+ For the Keplerian orbit equation, it can be written as:
532
+ d2u
533
+ dϕ2 + u = − µ
534
+ ℓ2
535
+ d
536
+ duV
537
+ �1
538
+ u
539
+
540
+ (33)
541
+ where u = 1r denotes the Binet transformation, giving, for small r/λ, the following differential
542
+ equation:
543
+ d2u
544
+ dϕ2 + u = +µK
545
+ ℓ2 (1 + α)
546
+ (34)
547
+ whose solution is given by
548
+ u(ϕ) = 1
549
+ r = A [1 + e cos (ϕ − ϕ0)] : A = µK
550
+ ℓ2 (1 + α)
551
+ (35)
552
+ 7
553
+
554
+ with e is the eccentricity of the orbit. The purely Newtonian and purely Yukawa cases follow
555
+ respectively from (34)
556
+ Newtonoian: u(ϕ) = 1
557
+ r = µK
558
+ ℓ2 [1 + e cos (ϕ − ϕ0)]
559
+ (36)
560
+ Purely Yukawa: u(ϕ) = 1
561
+ r = µKα
562
+ ℓ2
563
+ [1 + e cos (ϕ − ϕ0)]
564
+ (37)
565
+ Finally, in order to satisfy Bertrands theorem, the following condition should be satisfied
566
+ d2Vred(r)
567
+ dr2
568
+ ����
569
+ r=r0
570
+ > 0
571
+ (38)
572
+ where the reduced potential is given by (16). With the approximations of (eq. 18)) and ignoring
573
+ terms of order O
574
+
575
+ r2
576
+ λ2
577
+
578
+ this condition becomes
579
+ d2Vred(r)
580
+ dr2
581
+ ����
582
+ r=r0
583
+ = µ2K4(1 + α)4
584
+ ℓ6
585
+ > 0
586
+ (39)
587
+ which is true, since α, µ, K, ℓ > 0, in general and in the special cases of Newtonian (α = 0)
588
+ and purely Yukawa potentials. This shows that the Yukawa plus Newtonian potential satisfies
589
+ Bertrands theorem for small rλ.
590
+ 4
591
+ Application to the solar system
592
+ We present here our results consisting of determining first the parameters of the models (rmin, rmax, a, b, e)
593
+ by comparing the previous approximative analytical solutions with the NASA data. Then, we
594
+ solved the equations of motion numerically using Matlab and the fourth-order Runge-Kutta
595
+ method with no approximation so that to be compared with the analytical solutions and with
596
+ the observed NASA data. We applied this for all the planets of the solar system. For each pair
597
+ (sun-planet) we used the following values M⊙ = 1.9885 × 1030kg, αnominal = 10−8, λ = 1015m.
598
+ We list in Table (1) the initial conditions used in the analytical and numerical calculations (the
599
+ period τ is used only in the numerical solution to determine the corresponding ‘step’):
600
+ MERCURY
601
+ VENUS
602
+ EARTH
603
+ MARS
604
+ JUPITER
605
+ SATURN
606
+ URANUS
607
+ NEPTUNE
608
+ PLUTO
609
+ mp(×1024kg)0.3302
610
+ 4.8673
611
+ 5.9722
612
+ 0.64169
613
+ 1898.13
614
+ 568.32
615
+ 86.811
616
+ 102.409
617
+ 0.01303
618
+ τ (days)
619
+ 87.969
620
+ 224.701
621
+ 365.256
622
+ 686.98
623
+ 4332.589
624
+ 10832.33
625
+ 30685.4
626
+ 60189
627
+ 90560
628
+ rmin
629
+ (×106km)
630
+ 0.046
631
+ 0.10748
632
+ 0.147095
633
+ 0.20665
634
+ 0.740595
635
+ 1.357554
636
+ 2.732696
637
+ 4.47105
638
+ 4.434987
639
+ vmax
640
+ (×103m/s)
641
+ 58.98
642
+ 35.26
643
+ 30.29
644
+ 26.5
645
+ 13.72
646
+ 10.18
647
+ 7.11
648
+ 5.5
649
+ 6.1
650
+ eccentricity
651
+ 0.20563
652
+ 0.00677
653
+ 0.01671
654
+ 0.09341
655
+ 0.04839
656
+ 0.05415
657
+ 0.04717
658
+ 0.00859
659
+ 0.24881
660
+ Table 1: Initial conditions used in the calculations where mp denotes the planet mass, τ is the
661
+ orbit period, rmin is the perihelion and vmax denotes the perihelion velocity
662
+ 4.1
663
+ Analytical Method
664
+ The analytical ellipsis equation is of the form
665
+ 1
666
+ r ≡ u
667
+ =
668
+ a
669
+ b2 (1 + e cos ϕ) ,
670
+ (40)
671
+ 8
672
+
673
+ where (for a y-axis perpendicular to the polar axis in the orbit plane)
674
+ rmin = a(1 − e)
675
+ ,
676
+ rmax = a(1 + e),
677
+ (41)
678
+ e = c
679
+ a =
680
+
681
+ 1 − b2
682
+ a2
683
+ :
684
+ c2 = a2 − b2,
685
+ (42)
686
+ a = rmin + rmax
687
+ 2
688
+ ,
689
+ b = ymax − ymin
690
+ 2
691
+ (43)
692
+ Thus, analytically one can start with (rmin, vmax, e) observed by NASA in [24] to compute†:
693
+ a = rmin
694
+ 1 − e
695
+ ,
696
+ b = a
697
+
698
+ 1 − e2,
699
+ (44)
700
+ and estimate the strength α from
701
+ µK
702
+ ℓ2 (1 + α) = a
703
+ b2
704
+ using
705
+ ℓ = rminvmax.
706
+ (45)
707
+ Once the analytical equation is determined, then one can plot the trajectory and recompute the
708
+ characteristics (rmin, rmax, a, b, e) using Eqs (41,42). We call this procedure the “analytical-α-
709
+ estimated” approach.
710
+ One can also use the nominal value of α = 10−8, and plug it in Eq. (40), where ℓ, e are taken
711
+ from the observed data, to re-evaluate (rmin, rmax, a, b, e) from
712
+ a =
713
+ 1
714
+ A(1 − e2),
715
+ A = µK(1+α)
716
+ ℓ2
717
+ ,
718
+ b =
719
+ 1
720
+ A
721
+
722
+ 1 − e2 .
723
+ (46)
724
+ We call this procedure the “analytical-α-nominal” approach, which can be looked at as a method
725
+ with three inputs (α, ℓ, e) instead of the three inputs (rmin, vmax, e) used in the other approach.
726
+ 4.2
727
+ Numerical Method
728
+ Here, we just solve numerically, using the fourth-order Runge-Kutta method, the Newton’s law
729
+ equation of motion in the c.o.m frame with initial conditions taken from NASA. Thus we solve
730
+ the equations:
731
+ ¨⃗r1 = Gmp
732
+ ⃗r2 − ⃗r1
733
+ r3
734
+ , Newton,
735
+ ¨⃗r2 = GM⊙
736
+ ⃗r1 − ⃗r2
737
+ r3
738
+ = −M⊙
739
+ mp
740
+ ¨⃗r1,
741
+ (47)
742
+ ¨⃗r1 = Gmp
743
+
744
+ (1 + αe− r
745
+ λ )1
746
+ r + α
747
+ λe− r
748
+ λ
749
+ � ⃗r2 − ⃗r1
750
+ r2
751
+ , Newton+Yukawa,
752
+ ¨⃗r2 = −M⊙
753
+ mp
754
+ ¨⃗r1,
755
+ (48)
756
+ under the initial conditions given by NASA data of (rmin, vmax):
757
+ ⃗r1(t = tmin) =
758
+ mp
759
+ mp + M⊙
760
+ ⃗rmin
761
+ ,
762
+ ⃗v1(t = tmin) =
763
+ mp
764
+ mp + M⊙
765
+ ⃗vmax,
766
+ (49)
767
+ ⃗r2(t = tmin) = −
768
+ M⊙
769
+ mp + M⊙
770
+ ⃗rmin
771
+ ,
772
+ ⃗v2(t = tmin) = −
773
+ M⊙
774
+ mp + M⊙
775
+ ⃗vmax.
776
+ (50)
777
+ Once the trajectory is solved numerically, we check that it is closed, as the Fig.
778
+ (2) shows
779
+ for both the pure Newton and that with the Yukawa corrections (since the differences are not
780
+ visible on the figure scale containing all the planets). For each obtained orbit, we recalculate
781
+ the corresponding characteristics (rmin, rmax, a, b, e).
782
+ †Due to measurement errors and orbits not being perfectly elliptical, the NASA data may give slightly different
783
+ values of a using Eq. 43 or Eq. 44.
784
+ 9
785
+
786
+ Figure 2: Closed bound planets’ trajectories with and without Yukawa corrections with strength
787
+ α nominal.
788
+ 4.3
789
+ Results
790
+ We report in the Tables of Appendix A (from A1 to A18), the calculated characteristics of the
791
+ resulting trajectories for all the planets in the solar system, corresponding to the pure Newton
792
+ and the Newton corrected with Yukawa potentials, both in the analytical and the numerical
793
+ approaches. The odd (even) numbered tables correspond to the nominal (estimated) Yukawa
794
+ strength α. The number of moons of each planet is determined according to [30]. Below we
795
+ explain the meanings of the symbols used in the tables.
796
+ • Nnum: Numerical calculations using the Newtonian potential.
797
+ • Nanal: Analytical calculations using the Newtonian potential.
798
+ • RN = Nnum
799
+ Nanal %: The percentage ratio of the numerical to the analytical results for Newton
800
+ potential.
801
+ • (N + Y K)num : Numerical calculations using the modified potential.
802
+ • (N + Y K)anal: Analytical calculations using the modified potential.
803
+ • RN+Y K = (N+Y K)num
804
+ (N+Y K)anal %: The percentage ratio of the numerical to the analytical results
805
+ for modified potential.
806
+ • RN−Obs
807
+ num
808
+ = Nnum/Obs%: Percentage ratio of the numerical results, using the Newtonian
809
+ potential, to the observed results.
810
+ • RN−Obs
811
+ anal
812
+ = Nanal/Obs %: Percentage ratio the analytical results, using the Newtonian
813
+ potential, to the observed results.
814
+ • RY K−Obs
815
+ num
816
+ = (N + Y K)num/Obs %: Percentage ratio of the numerical results, using the
817
+ modified potential, to the observed results.
818
+ 10
819
+
820
+ (km)• RY K−Obs
821
+ anal
822
+ = (N + Y K)anal/Obs %: Percentage ratio of the analytical results, using the
823
+ modified potential, to the observed results.
824
+ In order to summarize the findings of the Tables, we present in Fig. (3) plots showing, for each
825
+ planet and at every polar angle, the deviation from unity of the ratio between two quantities of
826
+ the following, allowing thus to compare the effects of the considered potential (Newton vs New-
827
+ ton+Yuakawa) and/or the used method (numerical vs analytical) and/or the Yukawa strength
828
+ determination (nominal vs estimated):
829
+ • rn(num) representing the trajectory equation of the numerical approach with Newton
830
+ potential,
831
+ • rn(anl) representing the trajectory equation of the analytical approach with Newton po-
832
+ tential,
833
+ • ryk(num) representing the trajectory equation of the numerical approach with New-
834
+ ton+Yukawa potential and nominal α,
835
+ • ryk(anl) representing the trajectory equation of the analytical approach with Newton+Yukawa
836
+ potential and nominal α,
837
+ • ryka(num) representing the trajectory equation of the numerical approach with New-
838
+ ton+Yukawa potential and estimated α,
839
+ • ryka(anl) representing the trajectory equation of the analytical approach with New-
840
+ ton+Yukawa potential and estimated α.
841
+ We see that some ratios (e.g. the dashed red and sky blue) do coincide near zero deviation
842
+ from one, meaning no tangible effect of adding the Yukawa correction, be it in the analytic or
843
+ the numeric method, as long as one takes the nominal value of α. Also,we note local extremums
844
+ for the deviations from unity at polar angles multiples of π/2 as a generic feature in many plots.
845
+ One can interpret the large values of the deviations for the nearest (Mercury) and the farthest
846
+ (Pluto) planet, in that for the former; the perturbative effect of solar winds, important as we
847
+ approach the sun, was not taken into consideration, whereas for the furthest; accumulating
848
+ gravitational screening effects of the other planets and their moons, which were not considered
849
+ in the study, are becoming important especially for a small sized- planetoid like Pluto.
850
+ In order to show the effects of the separating distance effect, one should compute the absolute
851
+ deviations from observed data for each planet. In appendix B, the Tables B1, B2 (B3, B4), report
852
+ the deviation from observation for each planet of rmax, rmin respectively, in the case of nominal
853
+ (estimated) α. We summarize these findings in Fig. (4). We see that the agreement between
854
+ the numerical and analytical solutions is excellent in both estimated and nominal α cases. We
855
+ see that the deviations due to Yukawa correction are not large, but note the following:
856
+ 1. For estimated α:
857
+ • rmax-deviation: The numerical deviation is larger by about 103 times the analytical
858
+ deviation. In general, it increases the further the planet is, and reaches a maximum
859
+ of order (−25 million km) (less than the observed value) in Pluto.
860
+ • rmin-deviation: Again, the numerical deviation is larger by about (101-102)-order of
861
+ magnitude than analytical deviation, where it is largest in Neptune (−8 million km),
862
+ however it reverses sign and becomes (+5 million km) more than the observation in
863
+ Pluto.
864
+ 11
865
+
866
+
867
+
868
+
869
+
870
+
871
+
872
+
873
+
874
+
875
+ 1-{ rn(num)/ryka(anl) } ____
876
+ 1-{ryka(anl)/ryk(num)} ____
877
+ 1-{ryka(anl)/ryka(num)} ____
878
+ 1-{ rn(anl)/ryka(anl)} ____
879
+ 1-{ryka(anl)/ryk(anl)} ____
880
+ 1-{ rn(num)/ryk(num)} -----
881
+ 1-{ryka(num)/rn(num)} -----
882
+ 1-{ryk(num)/ryka(num)} -----
883
+ 1-{ryk(anl)/rn(anl)} -----
884
+
885
+
886
+ Figure 3:
887
+ Deviations from Unity for Ratios of Computed trajectories at each polar angle,
888
+ according to the considered potential (Newton vs Newton+Yuakawa) and/or to the used method
889
+ (numerical vs analytical) and/or to the Yukawa strength determination (nominal vs estimated).
890
+ We show in a zoomed region, for one planet (Earth) generic case, that the dashed red and sky
891
+ blue curves are very near each other (the same applies to the green and blue curves in Mercury
892
+ case).
893
+ 12
894
+
895
+ VENUS
896
+ Deviationsfrom unityforRatiosof ComputedTrajectories(%)
897
+ 0.03
898
+ 0.02
899
+ 1-Percentage Ratio(%)
900
+ 0.01
901
+ 0.01
902
+ -0.02
903
+ -0.03
904
+ 0
905
+ 50
906
+ 100
907
+ 150
908
+ 200
909
+ 250
910
+ 300
911
+ 350
912
+ 400
913
+ angle (deg)EARTH
914
+ Deviationsfrom unityforRatiosof ComputedTrajectories(%)
915
+ 0.08
916
+ 0.06
917
+ 1-Percentage Ratio(%)
918
+ 0.04
919
+ 0.02
920
+ 0.02
921
+ -0.04
922
+ -0.06
923
+ -0.08
924
+ 0
925
+ 50
926
+ 100
927
+ 150
928
+ 200
929
+ 250
930
+ 300
931
+ 350
932
+ 400
933
+ angle (deg)MARS
934
+ Deviationsfrom unityforRatiosof ComputedTrajectories(%)
935
+ 2
936
+ 1.5
937
+ 0.5
938
+ 0.5
939
+ 1
940
+ -1.5
941
+ -2
942
+ 0
943
+ 50
944
+ 100
945
+ 150
946
+ 200
947
+ 250
948
+ 300
949
+ 350
950
+ 400
951
+ angle (deg)JUPITER
952
+ Deviationsfrom unityforRatiosof ComputedTrajectories(%)
953
+ 0.6
954
+ 0.4
955
+ 1-Percentage Ratio(%)
956
+ 0.2
957
+ -0.2
958
+ -0.4
959
+ -0.6
960
+ 0
961
+ 50
962
+ 100
963
+ 150
964
+ 200
965
+ 250
966
+ 300
967
+ 350
968
+ 400
969
+ angle (deg)SATURN
970
+ Deviationsfrom unityforRatiosof ComputedTrajectories(%)
971
+ 3
972
+ 2
973
+ -1
974
+ -2
975
+ -3
976
+ 0
977
+ 50
978
+ 100
979
+ 150
980
+ 200
981
+ 250
982
+ 300
983
+ 350
984
+ 400
985
+ angle (deg)URANUS
986
+ Deviationsfrom unityforRatiosof ComputedTrajectories(%)
987
+ 1.5
988
+ 1-Percentage Ratio(%)
989
+ 0.5
990
+ 0.5
991
+ 1
992
+ -1.5
993
+ 0
994
+ 50
995
+ 100
996
+ 150
997
+ 200
998
+ 250
999
+ 300
1000
+ 350
1001
+ 400
1002
+ angle (deg)Neptune
1003
+ Deviationsfrom unityforRatiosof ComputedTrajectories(%)
1004
+ 2
1005
+ 1.5
1006
+ 0.5
1007
+ 0.5
1008
+ -1.5
1009
+ -2
1010
+ 0
1011
+ 50
1012
+ 100
1013
+ 150
1014
+ 200
1015
+ 250
1016
+ 300
1017
+ 350
1018
+ 400
1019
+ angle (deg)Pluto
1020
+ Deviationsfrom unityforRatiosof ComputedTrajectories(%)
1021
+ 15
1022
+ 10
1023
+ 1-Percentage Ratio(%)
1024
+ -5
1025
+ -10
1026
+ -15
1027
+ 0
1028
+ 50
1029
+ 100
1030
+ 150
1031
+ 200
1032
+ 250
1033
+ 300
1034
+ 350
1035
+ 400
1036
+ angle (deg)MERCURY
1037
+ Deviationsfrom unityforRatiosof ComputedTrajectories(%)
1038
+ 10
1039
+ 8
1040
+ 6
1041
+ 1-PercentageRatio(%)
1042
+ 4
1043
+ N
1044
+ A
1045
+ -6
1046
+ -8
1047
+ -10
1048
+ 0
1049
+ 50
1050
+ 100
1051
+ 150
1052
+ 200
1053
+ 250
1054
+ 300
1055
+ 350
1056
+ 400
1057
+ angle (deg)
1058
+
1059
+
1060
+
1061
+
1062
+ -50
1063
+ 0
1064
+ 50
1065
+ 100
1066
+ MERCURY
1067
+ Venus
1068
+ EARTH
1069
+ MARS
1070
+ JUPITER
1071
+ SATURN
1072
+ URANUS
1073
+ Neptune
1074
+ Pluto
1075
+ Deviations from the observed values for rmax; alpha nominal
1076
+ Absolute deviation from observation numerically
1077
+ ×10^6(km)
1078
+ Absolute deviation from observation analytically
1079
+ ×10^6(km)
1080
+ -20
1081
+ 0
1082
+ 20
1083
+ 40
1084
+ 60
1085
+ MERCURY
1086
+ Venus
1087
+ EARTH
1088
+ MARS
1089
+ JUPITER
1090
+ SATURN
1091
+ URANUS
1092
+ Neptune
1093
+ Pluto
1094
+ Deviations from the observed values for rmin; alpha nominal
1095
+ Absolute deviation from observation numerically
1096
+ ×10^6(km)
1097
+ Absolute deviation from observation analytically
1098
+ ×10^6(km)
1099
+ -30
1100
+ -20
1101
+ -10
1102
+ 0
1103
+ 10
1104
+ MERCURY
1105
+ Venus
1106
+ EARTH
1107
+ MARS
1108
+ JUPITER
1109
+ SATURN
1110
+ URANUS
1111
+ Neptune
1112
+ Pluto
1113
+ Deviations from the observed values for rmax; alpha estimated
1114
+ Absolute deviation from observation numerically
1115
+ ×10^6(km)
1116
+ Absolute deviation from observation analytically
1117
+ ×10^6(km)
1118
+ -10
1119
+ -5
1120
+ 0
1121
+ 5
1122
+ 10
1123
+ MERCURY
1124
+ Venus
1125
+ EARTH
1126
+ MARS
1127
+ JUPITER
1128
+ SATURN
1129
+ URANUS
1130
+ Neptune
1131
+ Pluto
1132
+ Deviations from the observed values for rmin; alpha estimated
1133
+ Absolute deviation from observation numerically
1134
+ ×10^6(km)
1135
+ Absolute deviation from observation analytically
1136
+ ×10^6(km)
1137
+ Figure 4: Absolute deviations from observed data for each planet, according to the used method
1138
+ (numerical vs analytical) and/or to the Yukawa strength determination (nominal vs estimated).
1139
+ 13
1140
+
1141
+ 2. For nominal α:
1142
+ • rmax-deviation: The numerical deviation is larger than the analytical one, but are of
1143
+ the same order reaching a maximum of +80 (+40) million km using the numerical
1144
+ (analytical) method in Neptune. For Pluto and Uranus, we get (−40) million km in
1145
+ the numerical method (less than observed).
1146
+ • rmin-deviation: The analytical deviation is larger, and sometimes reverses sign com-
1147
+ pared to the numerical. For example, in Neptune the analytical approach gives a
1148
+ deviation of (+40 million km) from observation, whereas the numerical one gives a
1149
+ deviation of −5 million km (less than the observed value).
1150
+ Actually, the disagreements with observations are due to several reasons: the first one is physical
1151
+ in nature, in that it results from neglecting the perturbation due to third bodies, or, more
1152
+ generally, the effect of the natural satellites, such as moons or asteroids.
1153
+ Also, we did not
1154
+ either take into account the radiation and the solar wind physical effects. Moreover, the results
1155
+ were obtained as a 2-body problem, and hence the movement of more distant planets might be
1156
+ affected by planets closer to the sun, which can be present not in the dominant term, but in
1157
+ higher orders of expansion. The second factor lies in the computational side, and concerns the
1158
+ numerical method used, the value of the step size, and the high sensitivity of the problem to
1159
+ the initial conditions. One should also mention that for the analytical solution we restricted the
1160
+ study to leading order neglecting higher orders in the expansion of exponentials, whereas for the
1161
+ numerical solution the entire exponential is considered.
1162
+ 5
1163
+ Summary and Conclusion
1164
+ In this work, we followed [23] and used the Hamilton’s formulation in order to obtain the
1165
+ differential equation of motion and the path equation for the gravitational two-body system.
1166
+ The developments are carried out in the case of the pure Newtonian potential, the Newtonian
1167
+ corrected with Yukawa type potential and the pure Yukawa potential.
1168
+ As in [23], we have
1169
+ reviewed the stability problem, constructed the linearization matrix and tested the stability
1170
+ of the system for a Yukawa correction, and found that it is of a central solution type, which
1171
+ implies stable solutions near the fixed point. We repeated the analysis for a purely Yukawa force
1172
+ and found similar results. We also confirmed that the modified potential obeys the Bertrands
1173
+ theorem.
1174
+ Then, we determined the parameters’ set corresponding to the planets of the solar system
1175
+ starting from the observed (rmin, vmax, e) estimating α. For both the estimated and nominal
1176
+ values of α, we determined the characteristics of the trajectories numerically and analytically,
1177
+ and compared between the methods and with the observed data. We explained the extent to
1178
+ which these results are consistent with the observational data, presenting in form of histograms
1179
+ the absolute deviations from observations, which were found to give an upper deviation of order
1180
+ 80 million km in Neptune using nominal α, and 20 million km in Pluto using estimated α.
1181
+ Acknowledgments:
1182
+ N. Chamoun acknowledges support from the ICTP-Associate pro-
1183
+ gram, from the Humboldt Foundation and from the CAS-PIFI scholarship.
1184
+ 14
1185
+
1186
+ Appendices
1187
+ A. Tables of Calculated/Observed Parameters of the Planets
1188
+ 15
1189
+
1190
+ Mercury
1191
+ rmin(×106km)
1192
+ rmax(×106km)
1193
+ a(×106km)
1194
+ b(×106km)
1195
+ eccentricity
1196
+ Nnum
1197
+ 46
1198
+ 69.832
1199
+ 57.916
1200
+ 56.67679158
1201
+ 0.205744228
1202
+ Nanal
1203
+ 47
1204
+ 72.043
1205
+ 59.756
1206
+ 58.47840586
1207
+ 0.205646344
1208
+ RN = Nnum
1209
+ Nanal %
1210
+ 97
1211
+ 96.930
1212
+ 96.921
1213
+ 96.91918025
1214
+ 99.95242442
1215
+ (N + Y K)num
1216
+ 46
1217
+ 69.831
1218
+ 57.916
1219
+ 56.67678243
1220
+ 0.205738221
1221
+ (N + Y K)anal
1222
+ 47
1223
+ 72.043
1224
+ 59.756
1225
+ 58.47840528
1226
+ 0.205646344
1227
+ RN+Y K
1228
+ = (N+Y K)num
1229
+ (N+Y K)anal %
1230
+ 97
1231
+ 96.930
1232
+ 96.921
1233
+ 96.91916556
1234
+ 99.95534277
1235
+ Observation
1236
+ 46
1237
+ 69.818
1238
+ 57.909
1239
+ 0.20563069
1240
+ RN−Obs
1241
+ num
1242
+ = Nnum/Obs %
1243
+ 100
1244
+ 99.980
1245
+ 99.988
1246
+ 99.94481595
1247
+ RN−Obs
1248
+ anal
1249
+ = Nanal/Obs %
1250
+ 97
1251
+ 96.911
1252
+ 96.909
1253
+ 99.9923879
1254
+ RY K−Obs
1255
+ num
1256
+ =
1257
+ (N + Y K)num/Obs %
1258
+ 100
1259
+ 99.981
1260
+ 99.988
1261
+ 99.94773407
1262
+ RY K−Obs
1263
+ anal
1264
+ =
1265
+ (N + Y K)anal/Obs %
1266
+ 97
1267
+ 96.911
1268
+ 96.909
1269
+ 99.9923879
1270
+ nominal α = 10−8
1271
+ Table A1: The values of the calculated and observational astronomical parameters of the planet
1272
+ Mercury whose number of moons is 0
1273
+ Mercury
1274
+ rmin(×106km)
1275
+ rmax(×106km)
1276
+ a(×106km)
1277
+ b(×106km)
1278
+ eccentricity
1279
+ Nnum
1280
+ 46
1281
+ 69.623
1282
+ 57.826
1283
+ 56.65144795
1284
+ 0.2022
1285
+ Nanal
1286
+ 46
1287
+ 69.819
1288
+ 57.912
1289
+ 56.67470066
1290
+ 0.2056
1291
+ RN = Nnum
1292
+ Nanal %
1293
+ 100
1294
+ 99.719
1295
+ 99.851
1296
+ 99.95897162
1297
+ 98.3743
1298
+ (N + Y K)num
1299
+ 46
1300
+ 69.613
1301
+ 57.820
1302
+ 56.64729064
1303
+ 0.2022
1304
+ (N + Y K)anal
1305
+ 46
1306
+ 69.815
1307
+ 57.908
1308
+ 56.6714469
1309
+ 0.2056
1310
+ RN+Y K
1311
+ = (N+Y K)num
1312
+ (N+Y K)anal %
1313
+ 100
1314
+ 99.711
1315
+ 99.847
1316
+ 99.9573749
1317
+ 98.3408
1318
+ Observation
1319
+ 46
1320
+ 69.818
1321
+ 57.909
1322
+ 0.2056
1323
+ RN−Obs
1324
+ num
1325
+ = Nnum/Obs %
1326
+ 100
1327
+ 99.721
1328
+ 99.856
1329
+ 98.3817
1330
+ RN−Obs
1331
+ anal
1332
+ = Nanal/Obs %
1333
+ 100
1334
+ 100.001
1335
+ 100.005
1336
+ 100.0075
1337
+ RY K−Obs
1338
+ num
1339
+ =
1340
+ (N + Y K)num/Obs %
1341
+ 100
1342
+ 99.706
1343
+ 99.847
1344
+ 98.3482
1345
+ RY K−Obs
1346
+ anal
1347
+ =
1348
+ (N + Y K)anal/Obs %
1349
+ 100
1350
+ 99.995
1351
+ 99.999
1352
+ 100.0075
1353
+ estimated α = 5.741444131301954 × 10−5
1354
+ Table A2: The values of the calculated and observational astronomical parameters of the planet
1355
+ Mercury whose number of moons is 0
1356
+ 16
1357
+
1358
+ Venus
1359
+ rmin(×106km)
1360
+ rmax(×106km)
1361
+ a(×106km)
1362
+ b(×106km)
1363
+ eccentricity
1364
+ Nnum
1365
+ 107.30
1366
+ 108.689
1367
+ 107.99
1368
+ 107.9982364
1369
+ 0.0044
1370
+ Nanal
1371
+ 107.48
1372
+ 108.961
1373
+ 108.22
1374
+ 108.2222348
1375
+ 0.0072
1376
+ RN = Nnum
1377
+ Nanal %
1378
+ 99.83
1379
+ 99.750
1380
+ 99.79
1381
+ 99.79301998
1382
+ 60.8187
1383
+ (N + Y K)num
1384
+ 107.30
1385
+ 108.689
1386
+ 107.99
1387
+ 107.9982353
1388
+ 0.0044
1389
+ (N + Y K)anal
1390
+ 107.48
1391
+ 108.961
1392
+ 108.22
1393
+ 108.2222337
1394
+ 0.0072
1395
+ RN+Y K
1396
+ = (N+Y K)num
1397
+ (N+Y K)anal %
1398
+ 99.83
1399
+ 99.750
1400
+ 99.79
1401
+ 99.79301998
1402
+ 60.8189
1403
+ Observation
1404
+ 107.48
1405
+ 108.941
1406
+ 108.21
1407
+ 0.0068
1408
+ RN−Obs
1409
+ num
1410
+ = Nnum/Obs %
1411
+ 99.83
1412
+ 99.769
1413
+ 99.80
1414
+ 64.7150
1415
+ RN−Obs
1416
+ anal
1417
+ = Nanal/Obs %
1418
+ 100.00
1419
+ 100.018
1420
+ 100.01
1421
+ 106.4063
1422
+ RY K−Obs
1423
+ num
1424
+ =
1425
+ (N + Y K)num/Obs %
1426
+ 99.83
1427
+ 99.769
1428
+ 99.80
1429
+ 64.7152
1430
+ RY K−Obs
1431
+ anal
1432
+ =
1433
+ (N + Y K)anal/Obs %
1434
+ 100.00
1435
+ 100.018
1436
+ 100.01
1437
+ 106.4063
1438
+ nominal α = 10−8
1439
+ Table A3: The values of the calculated and observational astronomical parameters of the planet
1440
+ Venus whose number of moons is 0
1441
+ Venus
1442
+ rmin(×106km)
1443
+ rmax(×106km)
1444
+ a(×106km)
1445
+ b(×106km)
1446
+ eccentricity
1447
+ Nnum
1448
+ 107.30
1449
+ 108.689
1450
+ 107.99
1451
+ 107.9982364
1452
+ 0.0044
1453
+ Nanal
1454
+ 107.48
1455
+ 108.961
1456
+ 108.22
1457
+ 108.2222348
1458
+ 0.0072
1459
+ RN = Nnum
1460
+ Nanal %
1461
+ 99.83
1462
+ 99.750
1463
+ 99.79
1464
+ 99.79301998
1465
+ 60.8187
1466
+ (N + Y K)num
1467
+ 107.30
1468
+ 108.658
1469
+ 107.98
1470
+ 107.9821956
1471
+ 0.0045
1472
+ (N + Y K)anal
1473
+ 107.47
1474
+ 108.945
1475
+ 108.20
1476
+ 108.2068155
1477
+ 0.0072
1478
+ RN+Y K
1479
+ = (N+Y K)num
1480
+ (N+Y K)anal %
1481
+ 99.84
1482
+ 99.736
1483
+ 99.78
1484
+ 99.79241613
1485
+ 63.0300
1486
+ Observation
1487
+ 107.48
1488
+ 108.941
1489
+ 108.21
1490
+ 0.0068
1491
+ RN−Obs
1492
+ num
1493
+ = Nnum/Obs %
1494
+ 99.83
1495
+ 99.769
1496
+ 99.80
1497
+ 64.7150
1498
+ RN−Obs
1499
+ anal
1500
+ = Nanal/Obs %
1501
+ 100.00
1502
+ 100.018
1503
+ 100.01
1504
+ 106.4063
1505
+ RY K−Obs
1506
+ num
1507
+ =
1508
+ (N + Y K)num/Obs %
1509
+ 99.83
1510
+ 99.740
1511
+ 99.78
1512
+ 67.0680
1513
+ RY K−Obs
1514
+ anal
1515
+ =
1516
+ (N + Y K)anal/Obs %
1517
+ 99.99
1518
+ 100.004
1519
+ 99.99
1520
+ 106.4063
1521
+ estimated α = 1.424988220126711 × 10−4
1522
+ Table A4: The values of the calculated and observational astronomical parameters of the planet
1523
+ Venus whose number of moons is 0
1524
+ 17
1525
+
1526
+ EARTH
1527
+ rmin(×106km)
1528
+ rmax(×106km)
1529
+ a(×106km)
1530
+ b(×106km)
1531
+ eccentricity
1532
+ Nnum
1533
+ 146.884
1534
+ 151.7
1535
+ 149.336
1536
+ 149.319847
1537
+ 0.0156
1538
+ Nanal
1539
+ 147.126
1540
+ 152.1
1541
+ 149.625
1542
+ 149.6034965
1543
+ 0.0168
1544
+ RN = Nnum
1545
+ Nanal %
1546
+ 99.835
1547
+ 99.7
1548
+ 99.806
1549
+ 99.81039915
1550
+ 92.5721
1551
+ (N + Y K)num
1552
+ 146.884
1553
+ 151.7
1554
+ 149.336
1555
+ 149.3198455
1556
+ 0.0156
1557
+ (N + Y K)anal
1558
+ 147.126
1559
+ 152.1
1560
+ 149.625
1561
+ 149.603495
1562
+ 0.0168
1563
+ RN+Y K
1564
+ = (N+Y K)num
1565
+ (N+Y K)anal %
1566
+ 99.835
1567
+ 99.7
1568
+ 99.806
1569
+ 99.81039915
1570
+ 92.5720
1571
+ Observation
1572
+ 147.095
1573
+ 152.1
1574
+ 149.598
1575
+ 0.0167
1576
+ RN−Obs
1577
+ num
1578
+ = Nnum/Obs %
1579
+ 99.8572
1580
+ 99.7
1581
+ 99.825
1582
+ 93.5903
1583
+ RN−Obs
1584
+ anal
1585
+ = Nanal/Obs %
1586
+ 99.978
1587
+ 99.9
1588
+ 99.981
1589
+ 98.9120
1590
+ RY K−Obs
1591
+ num
1592
+ =
1593
+ (N + Y K)num/Obs %
1594
+ 99.857
1595
+ 99.7
1596
+ 99.825
1597
+ 93.5902
1598
+ RY K−Obs
1599
+ anal
1600
+ =
1601
+ (N + Y K)anal/Obs %
1602
+ 99.978
1603
+ 99.9
1604
+ 99.981
1605
+ 98.9120
1606
+ nominal α = 10−8
1607
+ Table A5: The values of the calculated and observational astronomical parameters of the planet
1608
+ Earth whose number of moons is 0
1609
+ EARTH
1610
+ rmin(×106km)
1611
+ rmax(×106km)
1612
+ a(×106km)
1613
+ b(×106km)
1614
+ eccentricity
1615
+ Nnum
1616
+ 146.884
1617
+ 151.7
1618
+ 149.336
1619
+ 149.319847
1620
+ 0.0156
1621
+ Nanal
1622
+ 147.126
1623
+ 152.1
1624
+ 149.625
1625
+ 149.6034965
1626
+ 0.0168
1627
+ RN = Nnum
1628
+ Nanal %
1629
+ 99.835
1630
+ 99.7
1631
+ 99.806
1632
+ 99.81039915
1633
+ 92.5721
1634
+ (N + Y K)num
1635
+ 146.883
1636
+ 151.7
1637
+ 149.307
1638
+ 149.2910008
1639
+ 0.0154
1640
+ (N + Y K)anal
1641
+ 147.099
1642
+ 152.0
1643
+ 149.597
1644
+ 149.5762082
1645
+ 0.01688
1646
+ RN+Y K
1647
+ = (N+Y K)num
1648
+ (N+Y K)anal %
1649
+ 99.853
1650
+ 99.7
1651
+ 99.805
1652
+ 99.80932302
1653
+ 91.4700
1654
+ Observation
1655
+ 147.095
1656
+ 152.1
1657
+ 149.598
1658
+ 0.0167
1659
+ RN−Obs
1660
+ num
1661
+ = Nnum/Obs %
1662
+ 99.8572
1663
+ 99.7
1664
+ 99.825
1665
+ 93.5903
1666
+ RN−Obs
1667
+ anal
1668
+ = Nanal/Obs %
1669
+ 99.978
1670
+ 99.9
1671
+ 99.981
1672
+ 98.9120
1673
+ RY K−Obs
1674
+ num
1675
+ =
1676
+ (N + Y K)num/Obs %
1677
+ 99.856
1678
+ 99.7
1679
+ 99.805
1680
+ 92.4761
1681
+ RY K−Obs
1682
+ anal
1683
+ =
1684
+ (N + Y K)anal/Obs %
1685
+ 100.003
1686
+ 99.9
1687
+ 99.999
1688
+ 101.0999
1689
+ estimated α = 1.824376359731428 × 10−4
1690
+ Table A6: The values of the calculated and observational astronomical parameters of the planet
1691
+ Earth whose number of moons is 0
1692
+ 18
1693
+
1694
+ MARS
1695
+ rmin(×106km)
1696
+ rmax(×106km)
1697
+ a(×106km)
1698
+ b(×106km)
1699
+ eccentricity
1700
+ Nnum
1701
+ 206.57
1702
+ 248.480
1703
+ 227.52
1704
+ 226.6509159
1705
+ 0.0898
1706
+ Nanal
1707
+ 206.64
1708
+ 249.277
1709
+ 227.96
1710
+ 226.9631182
1711
+ 0.0935
1712
+ RN = Nnum
1713
+ Nanal %
1714
+ 99.96
1715
+ 99.680
1716
+ 99.80
1717
+ 99.8624436
1718
+ 96.0965
1719
+ (N + Y K)num
1720
+ 206.57
1721
+ 248.480
1722
+ 227.52
1723
+ 226.6509134
1724
+ 0.0898
1725
+ (N + Y K)anal
1726
+ 206.64
1727
+ 249.277
1728
+ 227.96
1729
+ 226.9631159
1730
+ 0.0935
1731
+ RN+Y K
1732
+ = (N+Y K)num
1733
+ (N+Y K)anal %
1734
+ 99.96
1735
+ 99.680
1736
+ 99.80
1737
+ 99.86244351
1738
+ 96.0965
1739
+ Observation
1740
+ 206.65
1741
+ 249.261
1742
+ 227.94
1743
+ 0.0935
1744
+ RN−Obs
1745
+ num
1746
+ = Nnum/Obs %
1747
+ 99.96
1748
+ 99.687
1749
+ 99.81
1750
+ 96.1252
1751
+ RN−Obs
1752
+ anal
1753
+ = Nanal/Obs %
1754
+ 99.99
1755
+ 100.006
1756
+ 100.01
1757
+ 100.0298
1758
+ RY K−Obs
1759
+ num
1760
+ =
1761
+ (N + Y K)num/Obs %
1762
+ 99.96
1763
+ 99.687
1764
+ 99.81
1765
+ 96.1252
1766
+ RY K−Obs
1767
+ anal
1768
+ =
1769
+ (N + Y K)anal/Obs %
1770
+ 99.99
1771
+ 100.006
1772
+ 100.01
1773
+ 100.0298
1774
+ nominal α = 10−8
1775
+ Table A7: The values of the calculated and observational astronomical parameters of the planet
1776
+ Mars whose number of moons is 0
1777
+ MARS
1778
+ rmin(×106km)
1779
+ rmax(×106km)
1780
+ a(×106km)
1781
+ b(×106km)
1782
+ eccentricity
1783
+ Nnum
1784
+ 206.57
1785
+ 248.480
1786
+ 227.52
1787
+ 226.6509159
1788
+ 0.0898
1789
+ Nanal
1790
+ 206.64
1791
+ 249.277
1792
+ 227.96
1793
+ 226.9631182
1794
+ 0.0935
1795
+ RN = Nnum
1796
+ Nanal %
1797
+ 99.96
1798
+ 99.680
1799
+ 99.80
1800
+ 99.8624436
1801
+ 96.0965
1802
+ (N + Y K)num
1803
+ 206.57
1804
+ 248.425
1805
+ 227.49
1806
+ 226.6249874
1807
+ 0.0897
1808
+ (N + Y K)anal
1809
+ 206.62
1810
+ 249.252
1811
+ 227.93
1812
+ 226.9402451
1813
+ 0.0935
1814
+ RN+Y K
1815
+ = (N+Y K)num
1816
+ (N+Y K)anal %
1817
+ 99.97
1818
+ 99.668
1819
+ 99.80
1820
+ 99.86108339
1821
+ 95.9861
1822
+ Observation
1823
+ 206.65
1824
+ 249.261
1825
+ 227.94
1826
+ 0.0935
1827
+ RN−Obs
1828
+ num
1829
+ = Nnum/Obs %
1830
+ 99.96
1831
+ 99.687
1832
+ 99.81
1833
+ 96.1252
1834
+ RN−Obs
1835
+ anal
1836
+ = Nanal/Obs %
1837
+ 99.99
1838
+ 100.006
1839
+ 100.01
1840
+ 100.0298
1841
+ RY K−Obs
1842
+ num
1843
+ =
1844
+ (N + Y K)num/Obs %
1845
+ 99.96
1846
+ 99.664
1847
+ 99.80
1848
+ 96.0147
1849
+ RY K−Obs
1850
+ anal
1851
+ =
1852
+ (N + Y K)anal/Obs %
1853
+ 99.98
1854
+ 99.996
1855
+ 99.99
1856
+ 100.0298
1857
+ estimated α = 1.007889331583467 × 10−4
1858
+ Table A8: The values of the calculated and observational astronomical parameters of the planet
1859
+ Mars whose number of moons is 0
1860
+ 19
1861
+
1862
+ JUPITER
1863
+ rmin(×106km)
1864
+ rmax(×106km)
1865
+ a(×106km)
1866
+ b(×106km)
1867
+ eccentricity
1868
+ Nnum
1869
+ 739.902
1870
+ 815.533
1871
+ 777.717
1872
+ 776.9190412
1873
+ 0.0469
1874
+ Nanal
1875
+ 742.542
1876
+ 818.568
1877
+ 780.555
1878
+ 779.626266
1879
+ 0.04873
1880
+ RN = Nnum
1881
+ Nanal %
1882
+ 99.644
1883
+ 99.629
1884
+ 99.636
1885
+ 99.65275352
1886
+ 96.3711
1887
+ (N + Y K)num
1888
+ 739.902
1889
+ 815.533
1890
+ 777.717
1891
+ 776.9190329
1892
+ 0.0469
1893
+ (N + Y K)anal
1894
+ 742.542
1895
+ 818.568
1896
+ 780.555
1897
+ 779.6262582
1898
+ 0.0487
1899
+ RN+Y K
1900
+ = (N+Y K)num
1901
+ (N+Y K)anal %
1902
+ 99.644
1903
+ 99.629
1904
+ 99.636
1905
+ 99.65275345
1906
+ 96.3711
1907
+ Observation
1908
+ 740.595
1909
+ 816.363
1910
+ 778.479
1911
+ 0.0487
1912
+ RN−Obs
1913
+ num
1914
+ = Nnum/Obs %
1915
+ 99.906
1916
+ 99.898
1917
+ 99.902
1918
+ 96.4399
1919
+ RN−Obs
1920
+ anal
1921
+ = Nanal/Obs %
1922
+ 100.262
1923
+ 100.270
1924
+ 100.266
1925
+ 100.0714
1926
+ RY K−Obs
1927
+ num
1928
+ =
1929
+ (N + Y K)num/Obs %
1930
+ 99.906
1931
+ 99.898
1932
+ 99.902
1933
+ 96.4399
1934
+ RY K−Obs
1935
+ anal
1936
+ =
1937
+ (N + Y K)anal/Obs %
1938
+ 100.262
1939
+ 100.270
1940
+ 100.266
1941
+ 100.0714
1942
+ nominal α = 10−8
1943
+ Table A9: The values of the calculated and observational astronomical parameters of the planet
1944
+ Jupiter whose number of moons is 0
1945
+ JUPITER
1946
+ rmin(×106km)
1947
+ rmax(×106km)
1948
+ a(×106km)
1949
+ b(×106km)
1950
+ eccentricity
1951
+ Nnum
1952
+ 739.902
1953
+ 815.533
1954
+ 777.717
1955
+ 776.9190412
1956
+ 0.0469
1957
+ Nanal
1958
+ 742.542
1959
+ 818.568
1960
+ 780.555
1961
+ 779.626266
1962
+ 0.04873
1963
+ RN = Nnum
1964
+ Nanal %
1965
+ 99.644
1966
+ 99.629
1967
+ 99.636
1968
+ 99.65275352
1969
+ 96.3711
1970
+ (N + Y K)num
1971
+ 739.837
1972
+ 810.932
1973
+ 775.385
1974
+ 774.6852056
1975
+ 0.0441
1976
+ (N + Y K)anal
1977
+ 740.567
1978
+ 816.390
1979
+ 778.478
1980
+ 777.5526264
1981
+ 0.0487
1982
+ RN+Y K
1983
+ = (N+Y K)num
1984
+ (N+Y K)anal %
1985
+ 99.901
1986
+ 99.331
1987
+ 99.602
1988
+ 99.63122486
1989
+ 90.6263
1990
+ Observation
1991
+ 740.595
1992
+ 816.363
1993
+ 778.479
1994
+ 0.0487
1995
+ RN−Obs
1996
+ num
1997
+ = Nnum/Obs %
1998
+ 99.906
1999
+ 99.898
2000
+ 99.902
2001
+ 96.4399
2002
+ RN−Obs
2003
+ anal
2004
+ = Nanal/Obs %
2005
+ 100.262
2006
+ 100.270
2007
+ 100.266
2008
+ 100.0714
2009
+ RY K−Obs
2010
+ num
2011
+ =
2012
+ (N + Y K)num/Obs %
2013
+ 99.897
2014
+ 99.334
2015
+ 99.602
2016
+ 90.6911
2017
+ RY K−Obs
2018
+ anal
2019
+ =
2020
+ (N + Y K)anal/Obs %
2021
+ 99.996
2022
+ 100.003
2023
+ 99.999
2024
+ 100.0714
2025
+ estimated α = 2.666880127522 × 10−3
2026
+ Table A10: The values of the calculated and observational astronomical parameters of the planet
2027
+ Jupiter whose number of moons is 0
2028
+ 20
2029
+
2030
+ SATURN
2031
+ rmin(×106km)
2032
+ rmax(×106km)
2033
+ a(×106km)
2034
+ b(×106km)
2035
+ eccentricity
2036
+ Nnum
2037
+ 1355.461
2038
+ 1523.344
2039
+ 1439.403
2040
+ 1437.455093
2041
+ 0.055
2042
+ Nanal
2043
+ 1368.378
2044
+ 1518.496
2045
+ 1443.437
2046
+ 1441.481829
2047
+ 0.052
2048
+ RN = Nnum
2049
+ Nanal %
2050
+ 99.056
2051
+ 100.319
2052
+ 99.720
2053
+ 99.72065302
2054
+ 106.042
2055
+ (N + Y K)num
2056
+ 1355.461
2057
+ 1523.344
2058
+ 1439.403
2059
+ 1437.455078
2060
+ 0.055
2061
+ (N + Y K)anal
2062
+ 1368.378
2063
+ 1518.496
2064
+ 1443.437
2065
+ 1441.481815
2066
+ 0.052
2067
+ RN+Y K
2068
+ = (N+Y K)num
2069
+ (N+Y K)anal %
2070
+ 99.056
2071
+ 100.319
2072
+ 99.720
2073
+ 99.72065294
2074
+ 106.042
2075
+ Observation
2076
+ 1357.554
2077
+ 1506.527
2078
+ 1432.041
2079
+ 0.052
2080
+ RN−Obs
2081
+ num
2082
+ = Nnum/Obs %
2083
+ 99.845
2084
+ 101.116
2085
+ 100.514
2086
+ 106.081
2087
+ RN−Obs
2088
+ anal
2089
+ = Nanal/Obs %
2090
+ 100.797
2091
+ 100.794
2092
+ 100.795
2093
+ 100.036
2094
+ RY K−Obs
2095
+ num
2096
+ =
2097
+ (N + Y K)num/Obs %
2098
+ 99.845
2099
+ 101.116
2100
+ 100.514
2101
+ 106.081
2102
+ RY K−Obs
2103
+ anal
2104
+ =
2105
+ (N + Y K)anal/Obs %
2106
+ 100.797
2107
+ 100.794
2108
+ 100.795
2109
+ 100.036
2110
+ nominal α = 10−8
2111
+ Table A11: The values of the calculated and observational astronomical parameters of the planet
2112
+ Saturn whose number of moons is 0
2113
+ SATURN
2114
+ rmin(×106km)
2115
+ rmax(×106km)
2116
+ a(×106km)
2117
+ b(×106km)
2118
+ eccentricity
2119
+ Nnum
2120
+ 1355.461
2121
+ 1523.344
2122
+ 1439.403
2123
+ 1437.455093
2124
+ 0.055
2125
+ Nanal
2126
+ 1368.378
2127
+ 1518.496
2128
+ 1443.437
2129
+ 1441.481829
2130
+ 0.052
2131
+ RN = Nnum
2132
+ Nanal %
2133
+ 99.056
2134
+ 100.319
2135
+ 99.720
2136
+ 99.72065302
2137
+ 106.042
2138
+ (N + Y K)num
2139
+ 1354.869
2140
+ 1497.652
2141
+ 1426.261
2142
+ 1424.954776
2143
+ 0.046
2144
+ (N + Y K)anal
2145
+ 1357.574
2146
+ 1506.507
2147
+ 1432.040
2148
+ 1430.100672
2149
+ 0.052
2150
+ RN+Y K
2151
+ = (N+Y K)num
2152
+ (N+Y K)anal %
2153
+ 99.800
2154
+ 99.412
2155
+ 99.596
2156
+ 99.64017246
2157
+ 89.244
2158
+ Observation
2159
+ 1357.554
2160
+ 1506.527
2161
+ 1432.041
2162
+ 0.052
2163
+ RN−Obs
2164
+ num
2165
+ = Nnum/Obs %
2166
+ 99.845
2167
+ 101.116
2168
+ 100.514
2169
+ 106.081
2170
+ RN−Obs
2171
+ anal
2172
+ = Nanal/Obs %
2173
+ 100.797
2174
+ 100.794
2175
+ 100.795
2176
+ 100.036
2177
+ RY K−Obs
2178
+ num
2179
+ =
2180
+ (N + Y K)num/Obs %
2181
+ 99.802
2182
+ 99.410
2183
+ 99.596
2184
+ 89.277
2185
+ RY K−Obs
2186
+ anal
2187
+ =
2188
+ (N + Y K)anal/Obs %
2189
+ 100.001
2190
+ 99.998
2191
+ 99.999
2192
+ 100.036
2193
+ estimated α = 7.958291053541 × 10−3
2194
+ Table A12: The values of the calculated and observational astronomical parameters of the planet
2195
+ Saturn whose number of moons is 0
2196
+ 21
2197
+
2198
+ URANUS
2199
+ rmin(×106km)
2200
+ rmax(×106km)
2201
+ a(×106km)
2202
+ b(×106km)
2203
+ eccentricity
2204
+ Nnum
2205
+ 2729.595
2206
+ 2957.44
2207
+ 2843.519
2208
+ 2841.649275
2209
+ 0.0381
2210
+ Nanal
2211
+ 2717.213
2212
+ 2984.63
2213
+ 2850.921
2214
+ 2847.766462
2215
+ 0.0469
2216
+ RN = Nnum
2217
+ Nanal %
2218
+ 100.455
2219
+ 99.08
2220
+ 99.740
2221
+ 99.78519352
2222
+ 81.2504
2223
+ (N + Y K)num
2224
+ 2729.595
2225
+ 2957.44
2226
+ 2843.519
2227
+ 2841.649245
2228
+ 0.0381
2229
+ (N + Y K)anal
2230
+ 2717.213
2231
+ 2984.63
2232
+ 2850.921
2233
+ 2847.766434
2234
+ 0.0469
2235
+ RN+Y K
2236
+ = (N+Y K)num
2237
+ (N+Y K)anal %
2238
+ 100.455
2239
+ 99.08
2240
+ 99.740
2241
+ 99.78519344
2242
+ 81.2504
2243
+ Observation
2244
+ 2732.696
2245
+ 3001.39
2246
+ 2867.043
2247
+ 0.0469
2248
+ RN−Obs
2249
+ num
2250
+ = Nnum/Obs %
2251
+ 99.886
2252
+ 98.53
2253
+ 99.179
2254
+ 81.3684
2255
+ RN−Obs
2256
+ anal
2257
+ = Nanal/Obs %
2258
+ 99.433
2259
+ 99.44
2260
+ 99.437
2261
+ 100.1452
2262
+ RY K−Obs
2263
+ num
2264
+ =
2265
+ (N + Y K)num/Obs %
2266
+ 99.886
2267
+ 98.53
2268
+ 99.179
2269
+ 81.3684
2270
+ RY K−Obs
2271
+ anal
2272
+ =
2273
+ (N + Y K)anal/Obs %
2274
+ 99.433
2275
+ 99.44
2276
+ 99.437
2277
+ 100.1452
2278
+ nominal α = 10−8
2279
+ Table A13: The values of the calculated and observational astronomical parameters of the planet
2280
+ Uranus whose number of moons is 0
2281
+ URANUS
2282
+ rmin(×106km)
2283
+ rmax(×106km)
2284
+ a(×106km)
2285
+ b(×106km)
2286
+ eccentricity
2287
+ Nnum
2288
+ 2729.595
2289
+ 2957.44
2290
+ 2843.519
2291
+ 2841.649275
2292
+ 0.0381
2293
+ Nanal
2294
+ 2717.213
2295
+ 2984.63
2296
+ 2850.921
2297
+ 2847.766462
2298
+ 0.0469
2299
+ RN = Nnum
2300
+ Nanal %
2301
+ 100.455
2302
+ 99.08
2303
+ 99.740
2304
+ 99.78519352
2305
+ 81.2504
2306
+ (N + Y K)num
2307
+ 2730.116
2308
+ 2992.91
2309
+ 2861.516
2310
+ 2858.935401
2311
+ 0.0441
2312
+ (N + Y K)anal
2313
+ 2732.578
2314
+ 3001.50
2315
+ 2867.042
2316
+ 2863.869614
2317
+ 0.0469
2318
+ RN+Y K
2319
+ = (N+Y K)num
2320
+ (N+Y K)anal %
2321
+ 99.909
2322
+ 99.71
2323
+ 99.807
2324
+ 99.82770818
2325
+ 94.0932
2326
+ Observation
2327
+ 2732.696
2328
+ 3001.39
2329
+ 2867.043
2330
+ 0.0469
2331
+ RN−Obs
2332
+ num
2333
+ = Nnum/Obs %
2334
+ 99.886
2335
+ 98.53
2336
+ 99.179
2337
+ 81.3684
2338
+ RN−Obs
2339
+ anal
2340
+ = Nanal/Obs %
2341
+ 99.433
2342
+ 99.44
2343
+ 99.437
2344
+ 100.1452
2345
+ RY K−Obs
2346
+ num
2347
+ =
2348
+ (N + Y K)num/Obs %
2349
+ 99.905
2350
+ 99.71
2351
+ 99.807
2352
+ 94.2299
2353
+ RY K−Obs
2354
+ anal
2355
+ =
2356
+ (N + Y K)anal/Obs %
2357
+ 99.995
2358
+ 100.00
2359
+ 99.999
2360
+ 100.1452
2361
+ estimated α = −5.622864957252 × 10−3
2362
+ Table A14: The values of the calculated and observational astronomical parameters of the planet
2363
+ Uranus whose number of moons is 0
2364
+ 22
2365
+
2366
+ Neptune
2367
+ rmin(×106km)
2368
+ rmax(×106km)
2369
+ a(×106km)
2370
+ b(×106km)
2371
+ eccentricity
2372
+ Nnum
2373
+ 4464.81
2374
+ 4634.099
2375
+ 4549.454
2376
+ 4548.810665
2377
+ 0.0177
2378
+ Nanal
2379
+ 4512.97
2380
+ 4601.381
2381
+ 4557.176
2382
+ 4556.953752
2383
+ 0.0097
2384
+ RN = Nnum
2385
+ Nanal %
2386
+ 98.93
2387
+ 100.711
2388
+ 99.830
2389
+ 99.82130416
2390
+ 180.8744
2391
+ (N + Y K)num
2392
+ 4464.81
2393
+ 4634.098
2394
+ 4549.454
2395
+ 4548.810617
2396
+ 0.0177
2397
+ (N + Y K)anal
2398
+ 4512.97
2399
+ 4601.381
2400
+ 4557.176
2401
+ 4556.953706
2402
+ 0.0097
2403
+ RN+Y K
2404
+ = (N+Y K)num
2405
+ (N+Y K)anal %
2406
+ 98.93
2407
+ 100.711
2408
+ 99.830
2409
+ 99.82130411
2410
+ 180.8743
2411
+ Observation
2412
+ 4471.05
2413
+ 4558.857
2414
+ 4514.953
2415
+ 0.0097
2416
+ RN−Obs
2417
+ num
2418
+ = Nnum/Obs %
2419
+ 99.86
2420
+ 101.650
2421
+ 100.764
2422
+ 182.6474
2423
+ RN−Obs
2424
+ anal
2425
+ = Nanal/Obs %
2426
+ 100.93
2427
+ 100.932
2428
+ 100.935
2429
+ 100.9802
2430
+ RY K−Obs
2431
+ num
2432
+ =
2433
+ (N + Y K)num/Obs %
2434
+ 99.86
2435
+ 101.650
2436
+ 100.764
2437
+ 182.6473
2438
+ RY K−Obs
2439
+ anal
2440
+ =
2441
+ (N + Y K)anal/Obs %
2442
+ 100.93
2443
+ 100.932
2444
+ 100.935
2445
+ 100.9802
2446
+ nominal α = 10−8
2447
+ Table A15: The values of the calculated and observational astronomical parameters of the planet
2448
+ Neptune whose number of moons is 0
2449
+ Neptune
2450
+ rmin(×106km)
2451
+ rmax(×106km)
2452
+ a(×106km)
2453
+ b(×106km)
2454
+ eccentricity
2455
+ Nnum
2456
+ 4464.81
2457
+ 4634.099
2458
+ 4549.454
2459
+ 4548.810665
2460
+ 0.0177
2461
+ Nanal
2462
+ 4512.97
2463
+ 4601.381
2464
+ 4557.176
2465
+ 4556.953752
2466
+ 0.0097
2467
+ RN = Nnum
2468
+ Nanal %
2469
+ 98.93
2470
+ 100.711
2471
+ 99.830
2472
+ 99.82130416
2473
+ 180.8744
2474
+ (N + Y K)num
2475
+ 4463.01
2476
+ 4546.479
2477
+ 4504.745
2478
+ 4504.517794
2479
+ 0.0096
2480
+ (N + Y K)anal
2481
+ 4471.15
2482
+ 4558.747
2483
+ 4514.952
2484
+ 4514.73215
2485
+ 0.0097
2486
+ RN+Y K
2487
+ = (N+Y K)num
2488
+ (N+Y K)anal %
2489
+ 99.81
2490
+ 99.730
2491
+ 99.773
2492
+ 99.77375499
2493
+ 98.6587
2494
+ Observation
2495
+ 4471.05
2496
+ 4558.857
2497
+ 4514.953
2498
+ 0.0097
2499
+ RN−Obs
2500
+ num
2501
+ = Nnum/Obs %
2502
+ 99.86
2503
+ 101.650
2504
+ 100.764
2505
+ 182.6474
2506
+ RN−Obs
2507
+ anal
2508
+ = Nanal/Obs %
2509
+ 100.93
2510
+ 100.932
2511
+ 100.935
2512
+ 100.9802
2513
+ RY K−Obs
2514
+ num
2515
+ =
2516
+ (N + Y K)num/Obs %
2517
+ 99.82
2518
+ 99.728
2519
+ 99.773
2520
+ 99.6259
2521
+ RY K−Obs
2522
+ anal
2523
+ =
2524
+ (N + Y K)anal/Obs %
2525
+ 100.00
2526
+ 99.997
2527
+ 99.999
2528
+ 100.9802
2529
+ estimated α = 9.351961741362 × 10−3
2530
+ Table A16: The values of the calculated and observational astronomical parameters of the planet
2531
+ Neptune whose number of moons is 0
2532
+ 23
2533
+
2534
+ Pluto
2535
+ rmin(×106km)
2536
+ rmax(×106km)
2537
+ a(×106km)
2538
+ b(×106km)
2539
+ eccentricity
2540
+ Nnum
2541
+ 4439.709
2542
+ 7265.423
2543
+ 5852.566
2544
+ 5684.326067
2545
+ 0.2397
2546
+ Nanal
2547
+ 4431.722
2548
+ 7298.614
2549
+ 5865.168
2550
+ 5687.267307
2551
+ 0.2444
2552
+ RN = Nnum
2553
+ Nanal %
2554
+ 100.180
2555
+ 99.545
2556
+ 99.785
2557
+ 99.94828377
2558
+ 98.0832
2559
+ (N + Y K)num
2560
+ 4439.709
2561
+ 7265.423
2562
+ 5852.566
2563
+ 5684.325992
2564
+ 0.2397
2565
+ (N + Y K)anal
2566
+ 4431.722
2567
+ 7298.614
2568
+ 5865.168
2569
+ 5687.26725
2570
+ 0.2444
2571
+ RN+Y K
2572
+ = (N+Y K)num
2573
+ (N+Y K)anal %
2574
+ 100.180
2575
+ 99.545
2576
+ 99.785
2577
+ 99.94828346
2578
+ 98.0832
2579
+ Observation
2580
+ 4434.987
2581
+ 7304.326
2582
+ 5869.656
2583
+ 0.2444
2584
+ RN−Obs
2585
+ num
2586
+ = Nnum/Obs %
2587
+ 100.106
2588
+ 99.467
2589
+ 99.708
2590
+ 98.0882
2591
+ RN−Obs
2592
+ anal
2593
+ = Nanal/Obs %
2594
+ 99.926
2595
+ 99.921
2596
+ 99.923
2597
+ 100.0051
2598
+ RY K−Obs
2599
+ num
2600
+ =
2601
+ (N + Y K)num/Obs %
2602
+ 100.106
2603
+ 99.467
2604
+ 99.708
2605
+ 98.0882
2606
+ RY K−Obs
2607
+ anal
2608
+ =
2609
+ (N + Y K)anal/Obs %
2610
+ 99.926
2611
+ 99.921
2612
+ 99.923
2613
+ 100.0051
2614
+ nominal α = 10−8
2615
+ Table A17: The values of the calculated and observational astronomical parameters of the planet
2616
+ Pluto whose number of moons is 0
2617
+ Pluto
2618
+ rmin(×106km)
2619
+ rmax(×106km)
2620
+ a(×106km)
2621
+ b(×106km)
2622
+ eccentricity
2623
+ Nnum
2624
+ 4439.709
2625
+ 7265.423
2626
+ 5852.566
2627
+ 5684.326067
2628
+ 0.2397
2629
+ Nanal
2630
+ 4431.722
2631
+ 7298.614
2632
+ 5865.168
2633
+ 5687.267307
2634
+ 0.2444
2635
+ RN = Nnum
2636
+ Nanal %
2637
+ 100.180
2638
+ 99.545
2639
+ 99.785
2640
+ 99.94828377
2641
+ 98.0832
2642
+ (N + Y K)num
2643
+ 4439.740
2644
+ 7280.242
2645
+ 5859.991
2646
+ 5690.112819
2647
+ 0.2407
2648
+ (N + Y K)anal
2649
+ 4435.112
2650
+ 7304.196
2651
+ 5869.654
2652
+ 5691.616958
2653
+ 0.2444
2654
+ RN+Y K
2655
+ = (N+Y K)num
2656
+ (N+Y K)anal %
2657
+ 100.104
2658
+ 99.672
2659
+ 99.835
2660
+ 99.97357273
2661
+ 98.4812
2662
+ Observation
2663
+ 4434.987
2664
+ 7304.326
2665
+ 5869.656
2666
+ 0.2444
2667
+ RN−Obs
2668
+ num
2669
+ = Nnum/Obs %
2670
+ 100.106
2671
+ 99.467
2672
+ 99.708
2673
+ 98.0882
2674
+ RN−Obs
2675
+ anal
2676
+ = Nanal/Obs %
2677
+ 99.926
2678
+ 99.921
2679
+ 99.923
2680
+ 100.0051
2681
+ RY K−Obs
2682
+ num
2683
+ =
2684
+ (N + Y K)num/Obs %
2685
+ 100.107
2686
+ 99.670
2687
+ 99.835
2688
+ 98.4862
2689
+ RY K−Obs
2690
+ anal
2691
+ =
2692
+ (N + Y K)anal/Obs %
2693
+ 100.002
2694
+ 99.998
2695
+ 99.999
2696
+ 100.0051
2697
+ estimated α = −7.642205983339201 × 10−4
2698
+ Table A18: The values of the calculated and observational astronomical parameters of the planet
2699
+ Pluto whose number of moons is 0
2700
+ 24
2701
+
2702
+ B. Tables of Absolute Deviations from Observation of the Planets
2703
+ 25
2704
+
2705
+ RY K−Obs
2706
+ num
2707
+ RY K−Obs
2708
+ anal
2709
+ Observed rmax
2710
+ rnum
2711
+ max − Obs
2712
+ ranal
2713
+ max − Obs
2714
+ MERCURY
2715
+ 99.721
2716
+ 100.001
2717
+ 69.818
2718
+ -0.194
2719
+ 0.001
2720
+ Venus
2721
+ 99.769
2722
+ 100.018
2723
+ 108.941
2724
+ -0.251
2725
+ 0.020
2726
+ EARTH
2727
+ 99.794
2728
+ 99.984
2729
+ 152.100
2730
+ -0.312
2731
+ -0.024
2732
+ MARS
2733
+ 99.687
2734
+ 100.006
2735
+ 249.261
2736
+ -0.780
2737
+ 0.016
2738
+ JUPITER
2739
+ 99.898
2740
+ 100.270
2741
+ 816.363
2742
+ -0.829
2743
+ 2.205
2744
+ SATURN
2745
+ 101.116
2746
+ 100.794
2747
+ 1506.527
2748
+ 16.817
2749
+ 11.969
2750
+ URANUS
2751
+ 98.535
2752
+ 99.441
2753
+ 3001.390
2754
+ -43.947
2755
+ -16.759
2756
+ Neptune
2757
+ 101.650
2758
+ 100.932
2759
+ 4558.857
2760
+ 75.241
2761
+ 42.524
2762
+ Pluto
2763
+ 99.467
2764
+ 99.921
2765
+ 7304.326
2766
+ -38.902
2767
+ -5.711
2768
+ Table B1: Absolute deviations, with nominal α, of rmax from observation, evaluated in (106
2769
+ km).
2770
+ RY K−Obs
2771
+ num
2772
+ RY K−Obs
2773
+ anal
2774
+ Observed rmin
2775
+ rnum
2776
+ min − Obs
2777
+ ranal
2778
+ min − Obs
2779
+ MERCURY
2780
+ 100.062
2781
+ 100.012
2782
+ 46.000
2783
+ 0.028
2784
+ 0.005
2785
+ Venus
2786
+ 99.839
2787
+ 100.008
2788
+ 107.480
2789
+ -0.172
2790
+ 0.009
2791
+ EARTH
2792
+ 99.857
2793
+ 99.978
2794
+ 147.095
2795
+ -0.210
2796
+ -0.031
2797
+ MARS
2798
+ 99.962
2799
+ 99.999
2800
+ 206.650
2801
+ -0.077
2802
+ -0.001
2803
+ JUPITER
2804
+ 99.906
2805
+ 100.262
2806
+ 740.595
2807
+ -0.692
2808
+ 1.947
2809
+ SATURN
2810
+ 99.845
2811
+ 100.797
2812
+ 1357.554
2813
+ -2.092
2814
+ 10.824
2815
+ URANUS
2816
+ 99.886
2817
+ 99.433
2818
+ 2732.696
2819
+ -3.100
2820
+ -15.482
2821
+ Neptune
2822
+ 99.860
2823
+ 100.937
2824
+ 4471.050
2825
+ -6.239
2826
+ 41.922
2827
+ Pluto
2828
+ 100.106
2829
+ 99.926
2830
+ 4434.987
2831
+ 4.722
2832
+ -3.264
2833
+ Table B2: Absolute deviations, with nominal α, of rmin from observation, evaluated in (106 km).
2834
+ 26
2835
+
2836
+ RY K−Obs
2837
+ num
2838
+ RY K−Obs
2839
+ anal
2840
+ Observed rmax
2841
+ rnum
2842
+ max − Obs
2843
+ ranal
2844
+ max − Obs
2845
+ MERCURY
2846
+ 99.706
2847
+ 99.995
2848
+ 69.818
2849
+ -0.204
2850
+ -0.002
2851
+ Venus
2852
+ 99.740
2853
+ 100.004
2854
+ 108.941
2855
+ -0.282
2856
+ 0.004
2857
+ EARTH
2858
+ 99.757
2859
+ 99.997
2860
+ 152.100
2861
+ -0.369
2862
+ -0.003
2863
+ MARS
2864
+ 99.664
2865
+ 99.996
2866
+ 249.261
2867
+ -0.835
2868
+ -0.008
2869
+ JUPITER
2870
+ 99.334
2871
+ 100.003
2872
+ 816.363
2873
+ -5.430
2874
+ 0.027
2875
+ SATURN
2876
+ 99.410
2877
+ 99.998
2878
+ 1506.527
2879
+ -8.874
2880
+ -0.019
2881
+ URANUS
2882
+ 99.717
2883
+ 100.003
2884
+ 3001.390
2885
+ -8.472
2886
+ 0.117
2887
+ Neptune
2888
+ 99.728
2889
+ 99.997
2890
+ 4558.857
2891
+ -12.377
2892
+ -0.109
2893
+ Pluto
2894
+ 99.670
2895
+ 99.998
2896
+ 7304.326
2897
+ -24.083
2898
+ -0.12907
2899
+ Table B3: Absolute deviations, with estimated α, of rmax from observation, evaluated in (106
2900
+ km).
2901
+ RY K−Obs
2902
+ num
2903
+ RY K−Obs
2904
+ anal
2905
+ Observed rmin
2906
+ rnum
2907
+ min − Obs
2908
+ ranal
2909
+ min − Obs
2910
+ MERCURY
2911
+ 100.062
2912
+ 100.006
2913
+ 46.000
2914
+ 0.028
2915
+ 0.002
2916
+ Venus
2917
+ 99.838
2918
+ 99.994
2919
+ 107.480
2920
+ -0.173
2921
+ -0.005
2922
+ EARTH
2923
+ 99.856
2924
+ 100.003
2925
+ 147.095
2926
+ -0.211
2927
+ 0.004
2928
+ MARS
2929
+ 99.962
2930
+ 99.989
2931
+ 206.650
2932
+ -0.077
2933
+ -0.022
2934
+ JUPITER
2935
+ 99.897
2936
+ 99.996
2937
+ 740.595
2938
+ -0.757
2939
+ -0.027
2940
+ SATURN
2941
+ 99.802
2942
+ 100.001
2943
+ 1357.554
2944
+ -2.684
2945
+ 0.020
2946
+ URANUS
2947
+ 99.905
2948
+ 99.995
2949
+ 2732.696
2950
+ -2.579
2951
+ -0.117
2952
+ Neptune
2953
+ 99.820
2954
+ 100.002
2955
+ 4471.050
2956
+ -8.037
2957
+ 0.107
2958
+ Pluto
2959
+ 100.107
2960
+ 100.002
2961
+ 4434.987
2962
+ 4.753
2963
+ 0.125
2964
+ Table B4: Absolute deviations, with estimated α, of rmin from observation, evaluated in (106
2965
+ km).
2966
+ 27
2967
+
2968
+ References
2969
+ [1] Ephraim Fischbach and Carrick L. Talmadge, The Search for Non-Newtonian Gravity,
2970
+ AIP Press, Springer (1999),
2971
+ [2] L. D. Landau and E. M. Lifshitz, Course of Theoretical Physics (Mechanics), Vols. 1 Ch
2972
+ 3, Sec 14, p 32, (Pergamon Press : Oxford), (1969).
2973
+ [3] I. Rodriguez and J. L. Brun, ”Closed orbits in central forces distinct from Coulomb or
2974
+ harmonic oscillator type,” European Journal of Physics, vol. 19, pp. 41-49, 1998.
2975
+ [4] . J. L. Brun and A. F. Pacheco, ”On closed but non-geometrically similar orbits,” Celestial
2976
+ Mech Dyn Astr, pp. 311-316, 2006.
2977
+ [5] K. Hinterbichler, “Theoretical Aspects of Massive Gravity”, Rev. Mod. Phys. 84, 671
2978
+ (2012), arXiv:1105.3735 [hep-th].
2979
+ [6] Claudia de Rham, “Massive Gravity”, Living Reviews in Relativity, 17 (2014) 7,
2980
+ arXiv:1401.4173 [hep-th].
2981
+ [7] A. S. Goldhabert and M. M. Nieto, “Mass of the graviton”, PRD9, 1119 (1974)
2982
+ [8] Yiming Dong, Lijing Shao, Zexin Hu, Xueli Miaoc and Ziming Wang, “Prospects for
2983
+ Constraining the Yukawa Gravity with Pulsars around Sagittarius A*”, JCAP-11-(2022)-
2984
+ 051, arXiv:2210.16130 [astro-ph.HE].
2985
+ [9] J. W. Moffat, ”Scalar-tensor-vector gravity theory,” Journal of Cosmology and Astropar-
2986
+ ticle Physics, 2006.
2987
+ [10] Xing Zhang, Tan Liu and Wen Zhao, “Gravitational radiation from compact binary sys-
2988
+ tems in screened modified gravity”, Phys. Rev.D95, 104027 (2017), arXiv: 1702.08752
2989
+ [gr-qc].
2990
+ [11] A. DAddio, R. Casadio, A. Giusti, and M. De Laurentis, “Orbits in bootstrapped Newto-
2991
+ nian gravity”, Phys. Rev. D 105, 104010 (2021), arXiv:2110.08379 [gr-qc]
2992
+ [12] R. Della Monica, I. de Martino, M. De Laurentis, “Orbital precession of the S2 star in
2993
+ ScalarTensorVector Gravity”, Monthly Notices of the Royal Astronomical Society, Volume
2994
+ 510, Issue 4, March 2022, Pages 47574766, arXiv:2105.12687 [gr-qc]
2995
+ [13] David Benisty, “Testing modified gravity via Yukawa potential in two body problem:
2996
+ Analytical solution and observational constraints”, Phys. Rev. D106, 043001 (2022).
2997
+ [14] I. Banik and H. Zhao, Mon. Not. Roy. Astron. Soc. 480, 2660 (2018), [Erratum:
2998
+ Mon.Not.Roy.Astron.Soc. 482, 3453 (2019), Erratum: Mon.Not.Roy.Astron.Soc. 484, 1589
2999
+ (2019)], arXiv:1805.12273 [astro-ph.GA].
3000
+ [15] Q. Yu, F. Zhang, and Y. Lu, Astrophys. J. 827, 114 (2016), arXiv:1606.07725 [astro-
3001
+ ph.HE].
3002
+ [16] D. Pricopi, Astrophys. Space Sci. 361, 277 (2016).
3003
+ [17] J. P. Edwards, U. Gerber, C. Schubert, M. A. Trejo, and A.Weber, PTEP 2017, 083A01
3004
+ (2017), arXiv:1706.09979 [physics.atom-ph].
3005
+ 28
3006
+
3007
+ [18] R. Mukherjee and S. Sounda, Indian Journal of Physics 92, 197 (2018), arXiv:1705.02444
3008
+ [physics.plasm-ph].
3009
+ [19] Lorenzo Iorio, “Putting Yukawa-LikeModified Gravity (MOG) on the Test in the Solar
3010
+ System”, Scholarly Research Exchange, Volume 2008, Article ID 238385
3011
+ [20] M. De Laurentis, I. De Martino, and R. Lazkoz, “Analysis of the Yukawa gravitational
3012
+ potential in f(R) gravity II: relativistic periastron advance”, Phys. Rev. D 97, 104068
3013
+ (2018), arXiv:1801.08136 [gr-qc].
3014
+ [21] J. Berg, P. Brax, M. Pernot-Borrs, and J.-P. Uzan, “Interpretation of geodesy exper-
3015
+ iments in non-Newtonian theories of gravity”, Class. Quant. Grav. 35, 234001 (2018),
3016
+ arXiv:1808.00340 [gr-qc].
3017
+ [22] Lorenzo Iorio, “Constraints on a Yukawa gravitational potential from laser data of LA-
3018
+ GEOS satellites”, Physics Letters A 298 (2002) 315318
3019
+ [23] E. Cavan, I. Haranas, I. Gkigkitzis and K. Cobbett, ”Dynamics and stability of the two
3020
+ body problem with Yukawa correction,” Astrophysics and Space Science, 2020.
3021
+ [24] ”https://nssdc.gsfc.nasa.gov/planetary/factsheet/index.html,” [Online].
3022
+ [25] A. Rujula, dedicated to viktor weisskopf on the occasion of the viki-fest, erice, 1986.
3023
+ [26] H. Goldstein, Classical Mechanics, SECOND EDITION ed., ADDISON-WESLEY PUB-
3024
+ LISHING COMPANY , 1980.
3025
+ [27] J. D. Meiss, Differential Dynamical Systems, 2007.
3026
+ [28] O. FACKLER and J. T. T. VAN, 5th FORCE NEUTRINO PHYSICS, 1988.
3027
+ [29] S. L. Ross, Differential Equation (John Willey & Sons), 1984, p. 661.
3028
+ [30] K. Wakker, Fundamentals of Astrodynamics, 2015.
3029
+ 29
3030
+
XdE0T4oBgHgl3EQfmgEE/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
c9E3T4oBgHgl3EQfeQoX/content/2301.04541v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1de61abc2d3da952511403efb0140d345beb54f6ebadc0d28ece72377c8e550
3
+ size 129419
dNE2T4oBgHgl3EQfGAZK/content/2301.03652v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b106e593ce087692bc26eb147e42bc8d37568ca23f96899ade65a539284cb168
3
+ size 2145989
dNFRT4oBgHgl3EQfTjeD/content/tmp_files/2301.13533v1.pdf.txt ADDED
@@ -0,0 +1,2769 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.13533v1 [eess.SY] 31 Jan 2023
2
+ 1
3
+ Passivity-based power sharing and voltage regulation
4
+ in DC microgrids with unactuated buses
5
+ Albertus Johannes Malan, Pol Jané-Soniera, Felix Strehle, and Sören Hohmann
6
+ Abstract—In this paper, we propose a novel four-
7
+ stage distributed controller for a DC microgrid that
8
+ achieves power sharing and average voltage regulation
9
+ for the voltages at actuated and unactuated buses. The
10
+ controller is presented for a DC microgrid compris-
11
+ ing multiple distributed generating units (DGUs) with
12
+ time-varying actuation states; dynamic RLC lines; non-
13
+ linear constant impedance, current and power (ZIP)
14
+ loads and a time-varying network topology. The con-
15
+ troller comprising a nonlinear gain, PI controllers, and
16
+ two dynamic distributed averaging stages is designed
17
+ for asymptotic stability. This constitutes first deriving
18
+ passivity properties for the DC microgrid, along with
19
+ each of the controller subsystems. Thereafter, design
20
+ parameters are found through a passivity-based optim-
21
+ isation using the worst-case subsystem properties. The
22
+ resulting closed-loop is robust against DGU actuation
23
+ changes, network topology changes, and microgrid
24
+ parameter changes. The stability and robustness of the
25
+ proposed control is verified via simulations.
26
+ Index Terms—DC microgrids, distributed control,
27
+ passivity, power sharing, voltage regulation.
28
+ I. Introduction
29
+ T
30
+ HE ADVENT of localised power generation and stor-
31
+ age increasingly challenges the prevailing centralised
32
+ power-generation structures. Originally proposed in [1],
33
+ the microgrids paradigm envisions networks that can oper-
34
+ ate autonomously through advanced control while meeting
35
+ consumer requirements. Although current electrical grids
36
+ predominantly use AC, high and low voltage DC networks
37
+ have been made technically feasible due to the continual
38
+ improvements of power electronics. Indeed, DC microgrids
39
+ exhibit significant advantages over their AC counterparts,
40
+ demonstrating a higher efficiency and power quality while
41
+ simultaneously being simpler to regulate [2], [3].
42
+ In microgrids, power generation and storage units
43
+ are typically grouped into distributed generation units
44
+ (DGUs) which connect to the microgrid through a single
45
+ DC-DC converter for higher efficiency [2]. This changes
46
+ the traditionally centralised regulation problem in power
47
+ grids into a problem of coordinating the DGU connected
48
+ throughout the microgrid. This coordination is generally
49
+ This work was supported in part by Germany’s Federal Ministry
50
+ for Economic Affairs and Climate Action (BMWK) through the
51
+ RegEnZell project (reference number 0350062C). (Corresponding
52
+ author: A. J. Malan.)
53
+ A. J. Malan, P. Jané-Soniera, F. Strehle, and S. Hohmann
54
+ are with the Institute of Control Systems (IRS), Karlsruhe In-
55
+ stitute of Technology (KIT), 76131, Karlsruhe, Germany. Emails:
56
+ albertus.malan@kit.edu, pol.soneira@kit.edu, felix.strehle@kit.edu,
57
+ soeren.hohmann@kit.edu.
58
+ realised as average or global voltage regulation in combina-
59
+ tion with load sharing between the DGUs (see e.g. [4]–[6]).
60
+ Literature Review: A vast number of approaches have
61
+ been proposed for the voltage regulation and load sharing
62
+ of DC microgrids, as detailed in the overview papers [3],
63
+ [7], [8] along with the sources therein. These approaches
64
+ are broadly categorised as either centralised, decentralised
65
+ or distributed in nature [3], [7], [8]. While centralised
66
+ controllers can optimally coordinate the DGUs, they offer
67
+ reduced scalability and flexibility and have a single point
68
+ of failure [8]. On the other hand, decentralised controllers
69
+ either only attempt to achieve voltage stability [9]–[11]
70
+ or achieve load sharing at the cost of voltage regulation
71
+ quality (e.g. the droop-based approaches in [3]).
72
+ In response to these limitations, numerous controllers
73
+ for voltage regulation and load sharing which operate in a
74
+ distributed manner have been proposed [4]–[6], [12]–[20].
75
+ In [4], distributed averaging is employed to find a global
76
+ voltage estimate with which voltage regulation is achieved,
77
+ but the microgrid dynamics are neglected in the stability
78
+ analysis. Distributed averaging with dynamic microgrid
79
+ models is used in [5], [12], although [5] requires LMIs to
80
+ be solved before buses are allowed to connect whereas
81
+ [12] only considers constant current loads. Similarly, a
82
+ sliding-mode controller is proposed in [13] for a dynamic
83
+ microgrid with constant current loads. On the other hand,
84
+ [14] proposes a cyberattack-resilient controller for a mi-
85
+ crogrid with constant conductance loads and resistive
86
+ lines. A consensus-based distributed controller with event-
87
+ triggered communication is presented in [15]. Consensus-
88
+ based controllers are also utilised in [6], [16], [17], where
89
+ [6] uses a consensus-based integral layer on top of a droop-
90
+ based controller. Finally, while many contributions strive
91
+ to achieve proportional current sharing [4]–[6], [12]–[17],
92
+ [20], nonlinear controllers that achieve proportional power
93
+ sharing have also been proposed in [18], [19].
94
+ While the literature listed above differ greatly in their
95
+ approaches, we note a commonality in their omission of
96
+ buses without actuation. This omission is typically mo-
97
+ tivated either by considering a microgrid comprising only
98
+ actuated DGU buses [4], [5], [16], [17], or by eliminating the
99
+ unactuated buses with the Kron-reduction [6], [12]–[15],
100
+ [18]–[20]. However, considering a network comprising only
101
+ actuated buses severely limits the flexibility of a microgrid,
102
+ since each bus must be able to supply or consume enough
103
+ power at all times. On the other hand, the Kron-reduction
104
+ requires loads to be described as positive conductances
105
+ (see e.g. [21]). While research into Kron-reduced networks
106
+ with negative loads is ongoing (see e.g. [22]), the general
107
+
108
+ 2
109
+ inclusion of negative loads, e.g. non-controllable power
110
+ sources, in Kron-reducible networks remains out of reach
111
+ at present. Furthermore, consider the case where a DGU
112
+ can no longer supply or consume the required amount of
113
+ power, e.g. a fully charged or discharged battery storage.
114
+ Such a DGU then loses the ability to regulate itself and
115
+ fully support the grid. In the approaches considered above
116
+ [4]–[6], [12]–[20], such a DGU is forced to disconnect from
117
+ the microgrid and its local measurements are discarded.
118
+ For DGUs with intermittent power sources, this could
119
+ result in significant swings in the number of controlled and
120
+ observed buses in the microgrid.
121
+ Main Contribution:
122
+ In this paper, we consider a
123
+ DC microgrid as a physically interconnected multi-agent
124
+ system. Extending our work in [23]1, we propose a four-
125
+ stage controller that achieves voltage regulation and power
126
+ sharing in a DC microgrid with actuated and unactuated
127
+ buses in a distributed manner. The four-stage controller
128
+ comprises a nonlinear weighting function, two dynamic
129
+ distributed averaging (DDA) stages and a proportional-
130
+ integral (PI) controller. The asymptotic stability of the
131
+ closed loop comprising the DC microgrid and the four-
132
+ stage controller interconnected in feedback is proven by
133
+ means of passivity theory. In detail, the contributions
134
+ comprise:
135
+ 1) A four-stage distributed controller for DC microgrids
136
+ which achieves consensus on the weighted average
137
+ voltage error of actuated and unactuated buses and
138
+ assures coordination through power sharing at the
139
+ actuated buses.
140
+ 2) A nonlinear weighting function that penalises voltage
141
+ errors outside a given tolerance band more strongly
142
+ than those within.
143
+ 3) Passivity classifications for each of the constitutive
144
+ microgrid subsystems (DGUs, loads, and lines) and
145
+ for each of the controller stages (weighting function,
146
+ DDA, and PI).
147
+ 4) A
148
+ method
149
+ for
150
+ calculating
151
+ the
152
+ input-feedforward
153
+ output-feedback passive (IF-OFP) indices of the non-
154
+ linear power-controlled DGUs through optimisation.
155
+ 5) An IF-OFP formulation for the DC microgrid with
156
+ a supply rate that is independent of the network
157
+ topology, the number of buses and their states of
158
+ actuation.
159
+ 6) A passivity-based stability analysis for the equilib-
160
+ rium of the DC microgrid connected in feedback with
161
+ the four-stage controller.
162
+ In addition to the contributions listed above, we also
163
+ contribute a theoretical result comprising a formalisation
164
+ of the obstacle presented by cascaded input-feedforward
165
+ passive (IFP) and output-feedback passive (OFP) systems
166
+ in the analysis of dissipative systems. This theoretical
167
+ 1The controller proposed in [23] is extended by weighing the
168
+ error with a nonlinear function. Moreover, in addition to applying
169
+ the controller to a DC microgrid context, we here propose a new
170
+ dissipativity-based analysis that investigates the closed loop stability
171
+ analytically as opposed to the numerical results in [23].
172
+ contribution informs and motivates parameter choices for
173
+ the four-stage controller in Contribution 1.
174
+ We highlight that the proposed controller can achieve
175
+ exact voltage regulation and power sharing with the
176
+ stability verified with the eigenvalues of the linearised
177
+ system. Moreover, by employing leaky PI controllers, we
178
+ demonstrate a passivity-based stability analysis that is
179
+ independent of and robust against changes in the commu-
180
+ nication topology, changes in the electrical topology, load
181
+ changes, changes in the actuation status of DGUs, uncer-
182
+ tainties in component parameters, and buses connecting
183
+ or disconnecting.
184
+ Paper Organisation: The introduction concludes with
185
+ some notation and preliminaries on graph theory. In
186
+ Section II, we recall and introduce results relating to
187
+ dissipativity theory. Next, in Section III, the problem is
188
+ modelled and objectives for the steady state are formal-
189
+ ised. In Section IV, a four-stage control structure is intro-
190
+ duced that fulfils objectives from Section III. Thereafter,
191
+ the passivity properties of the constituent subsystems are
192
+ investigated in Section V and the controller is designed
193
+ for asymptotic stability of the closed loop in Section VI.
194
+ Finally, in Section VII, a simulation is used to verify the
195
+ asymptotic stability and robustness of the closed loop.
196
+ Concluding remarks are provided in Section VIII.
197
+ Notation and Preliminaries: Define as a vector a =
198
+ (ak) and a matrix A = (akl). 1k is a k-dimensional vector
199
+ of ones and Ik is the identity matrix of dimension k.
200
+ Diag[·] creates a (block-)diagonal matrix from the supplied
201
+ vectors (or matrices). The upper and lower limits of a value
202
+ a are given by a and a. For a variable x, we denote its
203
+ unknown steady state as ˆx, its error state as ˜x := x − ˆx,
204
+ and a desired setpoint as x∗. Whenever clear from context,
205
+ we omit the time dependence of variables.
206
+ We denote by G = (N, E) a finite, weighted, undirected
207
+ graph with vertices N and edges E ⊆ N × N. Let |N| be
208
+ the cardinality of the set N. Let L be the Laplacian matrix
209
+ of G. By arbitrarily assigning directions to each edge in E,
210
+ the incidence matrix E ∈ R|N|×|E| of G is defined by
211
+ ekl =
212
+
213
+
214
+
215
+ +1
216
+ if vertex k is the sink of edge l,
217
+ −1
218
+ if vertex k is the source of edge l,
219
+ 0
220
+ otherwise.
221
+ (1)
222
+ II. Dissipativity Preliminaries
223
+ We here recall and introduce preliminaries of dissip-
224
+ ativity theory for nonlinear systems. In Section II-A we
225
+ provide definitions relating to dissipativity and passiv-
226
+ ity theory. Thereafter in Section II-B, we investigate
227
+ the passivity properties of static functions. Finally, in
228
+ Section II-C, we recall a result on the interconnection
229
+ of dissipative systems with quadratic supply rates and
230
+ formalise a new result on the limitations of such an
231
+ interconnection.
232
+
233
+ 3
234
+ A. Dissipative Systems
235
+ Consider a nonlinear system
236
+
237
+ ˙x = f(x, u),
238
+ y = h(x),
239
+ (2)
240
+ where x ∈ Rn, u ∈ Rm, y ∈ Rm and where f : Rn×Rm →
241
+ Rn and h : Rn × Rm → Rm are class C1 functions.
242
+ Definition 1 (Dissipative system, c.f. [24]–[26]). A system
243
+ (2) with a class C1 storage function S : Rn × Rm → R+ is
244
+ dissipative w.r.t. a supply rate w(u, y) if ˙S ≤ w(u, y).
245
+ Definition 2 (Quadratic supply rates, c.f. [24]–[26]). A
246
+ system (2) that is dissipative w.r.t. w(u, y) is
247
+ • passive if w = uTy,
248
+ • input-feedforward passive (IFP) if w = uTy − νuT u,
249
+ • output-feedback passive (OFP) if w = uT y − ρyT y,
250
+ • input-feedforward output-feedback passive (IF-OFP) if
251
+ w = (1 + νρ)uT y − νuT u − ρyT y,
252
+ • has an L2-gain of γL2 if w = γ2
253
+ L2uTu − yT y,
254
+ where γL2 > 0 and ν, ρ ∈ R.
255
+ Definition 3 (Zero-state observable (ZSO) [24, p. 46]). A
256
+ system (2) is ZSO if u ≡ 0 and y ≡ 0 implies x ≡ 0.
257
+ For cases where the desired equilibrium of a system is
258
+ not at the origin but at some constant value, the shifted
259
+ passivity [24, p. 96] or equilibrium-independent passivity
260
+ (EIP) [27] of a system must be investigated. Naturally, this
261
+ requires that an equilibrium exists, i.e. there is a unique
262
+ input ˆu ∈ Rm for every equilibrium ˆx ∈ ˆ
263
+ X ⊂ Rn such that
264
+ (2) produces f(ˆx, ˆu) = 0 and ˆy = h(ˆx, ˆu) [28, p. 24].
265
+ Definition 4 (EIP [28, p. 24]). A system (2) is EIP
266
+ if there exists a class C1 storage function S(x, ˆx, u),
267
+ S : Rn × ˆ
268
+ X × Rm → R+, with S(ˆx, ˆx, ˆu) = 0, that is dis-
269
+ sipative w.r.t. w(u − ˆu, y − ˆy) for any equilibrium (ˆu, ˆy).
270
+ B. Passive Static Functions
271
+ Recall that a sector-bounded static nonlinear function
272
+ is dissipative to a supply rate defined by the sector bound
273
+ [26, Def. 6.2]. We now consider the arbitrarily shifted
274
+ single-input single-output function
275
+
276
+ y = h(u),
277
+ u, ˆu ∈ U,
278
+ y, ˆy ∈ Y,
279
+ h : U → Y,
280
+ ˜y = ˜h(˜u) := h(u) − h(ˆu) = y − ˆy,
281
+ ˜u := u − ˆu
282
+ (3)
283
+ and show how its dissipativity properties may be derived.
284
+ Proposition 5 (EIP static functions). A static function
285
+ (3) of class C0 is IF-OFP(c, 1/c) w.r.t. the arbitrarily
286
+ shifted input-output pair (˜u, ˜y) if
287
+ c ≤ dh(u)
288
+ du
289
+ ≤ c,
290
+ ∀u ∈ U.
291
+ (4)
292
+ and 0 < c < ∞.
293
+ Proof. Consider for (3) the slope between an arbitrary
294
+ shift (ˆu, ˆy) ∈ U ×Y and a point (u, y), for which the upper
295
+ and lower bounds are given by
296
+ c ≤ y − ˆy
297
+ u − ˆu ≤ c,
298
+ ∀(u, y), (ˆu, ˆy) ∈ U × Y.
299
+ (5)
300
+ Changing to the shifted variables ˜u and ˜y as in (5) and
301
+ multiplying through by ˜u2 yields
302
+ c˜u2 ≤ ˜u˜y ≤ c˜u2 ⇐⇒ (˜y − c˜u)(˜y − c˜u) ≤ 0
303
+ ⇐⇒ (˜y − c˜u)(1
304
+ c ˜y − ˜u) ≤ 0,
305
+ (6)
306
+ for c > 0, which describes an IF-OFP function (see [26,
307
+ p. 231]). Finally, through the mean value theorem, the
308
+ bounds in (5) may be found from (4).
309
+
310
+ We note that the restrictions on c in Prop. 5 are needed
311
+ from a computational point of view (c < ∞) and to ensure
312
+ that the passivity indices correspond to the correct sector2
313
+ (c > 0). However, this limits the passivity properties
314
+ attainable through Prop. 5 to ρ = 1/c > 0.
315
+ Remark 1 (Symmetrical sectors). Placing the additional
316
+ restriction c = −c in (4) results in the Lipschitz continuity
317
+ of h(u). Moreover, this implies that the arbitrarily shifted
318
+ function ˜h(˜u) has a finite L2-gain of c [29].
319
+ C. Interconnected Quadratic Dissipative Systems
320
+ Building upon the results on the interconnection of
321
+ dissipative systems in [28], [30], we now provide a method
322
+ for finding dissipativity properties for a subset of the inter-
323
+ connected subsystems such that interconnected stability is
324
+ guaranteed. Specifically, we look for the dissipative supply
325
+ rates that restrict the subset of subsystems as little as pos-
326
+ sible. For a set S of subsystems, define u = [uT
327
+ 1 , . . . , uT
328
+ |S|]T
329
+ and y = [yT
330
+ 1 , . . . , yT
331
+ |S|]T .
332
+ Theorem 6 (Minimally restrictive stabilising indices).
333
+ Consider |S| subsystems of the form (2) which are dissipat-
334
+ ive w.r.t. the supply rates wi = 2σiuT
335
+ i yi−νiuT
336
+ i ui−ρiyT
337
+ i yi
338
+ and are linearly interconnected according to u = Hy. The
339
+ stability of the interconnected system is guaranteed if there
340
+ exists a D and νj, ρj ∈ R with j ∈ J such that
341
+ min
342
+ D, νj, ρj,
343
+ j∈J
344
+
345
+ j∈J
346
+ (νj + ρj)
347
+ s.t.
348
+ σj = 1/2(1 + νjρj),
349
+ j ∈ J,
350
+ Q ≼ 0,
351
+ D2 ≻ 0
352
+ (7)
353
+ where the subsystems with configurable supply rates are
354
+ represented by the set J ⊂ S, and
355
+ Q :=
356
+ �H
357
+ I
358
+ �T
359
+ DWD
360
+ �H
361
+ I
362
+
363
+ (8)
364
+ D := Diag[dT , dT ],
365
+ d = (
366
+
367
+ di),
368
+ (9)
369
+ W :=
370
+ �− Diag[νi]
371
+ Diag[σi]
372
+ Diag[σi]
373
+ − Diag[ρi]
374
+
375
+ ,
376
+ i ∈ S.
377
+ (10)
378
+ 2Consider e.g. the sector Prop. 5 would yield if c ≤ c < 0.
379
+
380
+ 4
381
+ The proof for Theorem 6 follows analogously to the
382
+ proof of [29, Theorem 13] with application of [29, Re-
383
+ mark 5] and is thus omitted for brevity. Note that if J = ∅
384
+ in (7), Theorem 6 can be used to verify the stability of
385
+ interconnected dissipative systems.
386
+ Despite the design flexibility provided by Theorem 6,
387
+ certain cascade configurations present obstacles to the ap-
388
+ plication of dissipativity theory. The following proposition
389
+ formalises the problem presented by one such configura-
390
+ tion which arises in the sequel and is used to inform the
391
+ control design.
392
+ Proposition 7 (Non-dissipativity of cascaded IFP-OFP
393
+ systems). Consider |S| ≥ 2 subsystems (2) which are
394
+ dissipative w.r.t. wi = 2σiuT
395
+ i yi − νiuT
396
+ i ui − ρiyT
397
+ i yi and
398
+ linearly interconnected according to u = Hy. Let i = 1 and
399
+ i = 2 arbitrarily denote subsystems that are IFP and OFP,
400
+ respectively. If these systems are connected in exclusive
401
+ casade and do not form a feedback connection, i.e.
402
+ H =
403
+
404
+
405
+ 0
406
+ 0
407
+
408
+ 1
409
+ 0
410
+ 0
411
+ 0
412
+
413
+
414
+
415
+  ,
416
+ (11)
417
+ then investigating stability via separable storage functions
418
+ as in Theorem 6 fails.
419
+ Proof. Evaluating the stability criteria in (7) under the
420
+ imposed IFP and OFP conditions yields the Q (8) entries
421
+ q11 = d1ρ1 + d2ν2 = 0,
422
+ q12 = q21 = d2σ2
423
+ 2
424
+ = d2
425
+ 2 . (12)
426
+ Since di > 0, Q constitutes an indefinite saddle-point mat-
427
+ rix [31, Section 3.4], violating the requirement in (7).
428
+
429
+ Remark
430
+ 2
431
+ (Non-separable
432
+ storage
433
+ functions).
434
+ The
435
+ obstacle in Prop. 7 arises due to the storage functions being
436
+ compartmentalised by the subsystem boundaries. While the
437
+ separability of storage functions is a central motivation for
438
+ the use of dissipativity theory, forgoing this allows for a
439
+ stability analysis through less conservative methods (e.g.
440
+ the KYP lemma).
441
+ III. Problem Description
442
+ In this section, the components comprising the DC mir-
443
+ crogrid are introduced in Section III-A. This is followed by
444
+ Section III-B, where controllers are added which regulate
445
+ the output power of actuated buses in order to facilitate
446
+ power sharing in the sequel. Finally, we formulate the
447
+ coordination and cooperation goals as a control problem
448
+ in Section III-C.
449
+ A. DC Network
450
+ We consider a DC microgrid comprising N = |N| buses
451
+ connected by via π-model electrical lines, as depicted in
452
+ Fig. 1. Let the graph GP = (N, EP) describe the intercon-
453
+ nection with N as the set of buses and EP as the set of
454
+ lines. Without loss of generalisation, we allow each node to
455
+ inject power through a DC-DC buck converter connected
456
+ via a lossy LC-filter. Note that a time-averaged model (see
457
+ e.g. [12]) is used for the buck converter and the energy
458
+ source is assumed to be ideal but finite.
459
+ Let the buses be split into an actuated set Nα and
460
+ an unactuated set Nβ, according to whether the buck
461
+ converter can freely regulate the amount of power injected
462
+ at a given time. Buses may freely switch between the sets
463
+ Nα and Nβ, but Nα ∩ Nβ = ∅ and Nα ∪ Nβ = N always
464
+ hold. To characterise this actuation state of a bus, define
465
+ the piecewise-constant, time-varying actuation parameter
466
+ αk(t) as
467
+ αk(t) :=
468
+ � 1,
469
+ k ∈ Nα,
470
+ 0,
471
+ k ∈ Nβ.
472
+ (13)
473
+ Note that we omit the time dependence of αk in the sequel.
474
+ The dynamics for actuated buses with DGUs, where
475
+ αk = 1 with k ∈ Nα are described by
476
+
477
+ Lk˙ik
478
+ Ceq,k ˙vk
479
+
480
+ =
481
+ �−Rk
482
+ −1
483
+ 1
484
+ 0
485
+ ��ik
486
+ vk
487
+
488
+ +
489
+
490
+ vs,k
491
+ −eT
492
+ P,kit − IL,k(vk)
493
+
494
+ (14)
495
+ where Ceq,k = Ck + 1/2eT
496
+ P,k Diag[Ckl]eP,k; Ck, Ckl, Lk > 0;
497
+ ik ∈ R; and vk ∈ R+. The line currents it connect to the
498
+ capacitor voltages according to incidence matrix EP =
499
+ (eT
500
+ P,k) of GP. The dynamics of the unactuated load buses
501
+ with αk = 0 correspond to the simplified system
502
+ Ceq,k ˙vk = −eT
503
+ P,kit − IL,k(vk),
504
+ k ∈ Nβ
505
+ (15)
506
+ In both the actuated (14) and unactuated (15) cases, the
507
+ loads are considered static, nonlinear voltage-dependent
508
+ current sources which are described by class C0 functions.
509
+ In this work, we utilise the standard ZIP-model comprising
510
+ constant impedance, constant current and constant power
511
+ parts. Note that other continuous functions may also be
512
+ used without restriction3. As described in [33, pp. 110–
513
+ 112], we define a critical voltage vcrit, typically set to
514
+ 0.7vRef, below which the loads are purely resistive. Thus,
515
+ IL,k(vk) =
516
+
517
+
518
+
519
+ Z−1
520
+ k
521
+ · vk + Ik + Pk
522
+ vk
523
+ ,
524
+ vk ≥ vcrit,
525
+ Z−1
526
+ crit,k · vk,
527
+ vk < vcrit,
528
+ (16)
529
+ Z−1
530
+ crit,k := IL,k(vcrit)
531
+ vcrit
532
+ = Z−1
533
+ k
534
+ + Ik
535
+ vcrit
536
+ + Pk
537
+ v2
538
+ crit
539
+ ,
540
+ (17)
541
+ describes a static, nonlinear load which conforms to (3).
542
+ Lastly, the π-model transmission lines physically con-
543
+ necting the nodes are governed by the dynamics
544
+ Lkl˙it,kl = −Rklit,kl + eT
545
+ P,klv,
546
+ kl ∈ EP,
547
+ (18)
548
+ where it,kl ∈ R, Lkl, Rkl > 0 and (eT
549
+ P,kl)T = EP. Note
550
+ that the line capacitances are included in the equivalent
551
+ capacitances Ceq,k at the buses.
552
+ B. DGU Power Regulator
553
+ To allow for power sharing between the actuated buses
554
+ (14) in the sequel, we equip each DGU with a controller
555
+ 3This includes exponential loads (see e.g. [32]).
556
+
557
+ 5
558
+ Linekl
559
+ αk ∈ {0, 1}
560
+ p∗
561
+ k
562
+ vk
563
+
564
+ +
565
+ vs,k
566
+ ik
567
+ Rk
568
+ Lk
569
+ Ck
570
+ IL,k(vk)
571
+ +
572
+
573
+ vk
574
+ Buckk
575
+ Busk
576
+ Ckl
577
+ 2
578
+ Rkl
579
+ it,kl
580
+ Lkl
581
+ Ckl
582
+ 2
583
+ Busl
584
+ Figure 1: Circuit diagram of a bus comprising a DC-DC buck converter, a filter, and a current source representing a
585
+ load, connected to a π-model line (blue); the line capacitances considered to be part of the respective buses.
586
+ that can regulate the injected power to a desired setpoint
587
+ p∗
588
+ k. This regulator has the form
589
+ ˙ed,k = αk(p∗
590
+ k − pk)
591
+ vs,k = kP
592
+ d (p∗
593
+ k − pk) + kI
594
+ ded,k + ˜Rkik + vRef
595
+ (19)
596
+ where ed ∈ R, pk = vkik is the actual power injected,
597
+ ˜R ∈ R is the damping added to the system, and kP
598
+ d , kI
599
+ d >
600
+ 0 are the control parameters. Combining (19) with (14)
601
+ yields the nonlinear system describing the actuated agents
602
+ k ∈ Nα
603
+
604
+
605
+ ˙ed,k
606
+ Lk˙ik
607
+ Ck ˙vk
608
+
609
+ =
610
+
611
+
612
+ 0
613
+ −vk
614
+ 0
615
+ kI
616
+ d
617
+ ˜Rk − Rk − kP
618
+ d vk
619
+ −1
620
+ 0
621
+ 1
622
+ 0
623
+
624
+
625
+
626
+
627
+ ed,k
628
+ ik
629
+ vk
630
+
631
+
632
+ +
633
+
634
+
635
+ p∗
636
+ k
637
+ kP
638
+ d p∗
639
+ k + vRef
640
+ −eT
641
+ k it − IL,k(vk)
642
+
643
+
644
+ (20)
645
+ Remark 3 (Regulating current or voltage). Without in-
646
+ validating the stability analysis in the sequel, the regulator
647
+ in (19) can be exchanged for simpler, purely linear current
648
+ or voltage regulators (see e.g. [9]–[11]).
649
+ Remark 4 (Constrained DGU operation). If an actuated
650
+ DGU cannot provide the desired power p∗
651
+ k, e.g. due to
652
+ current, storage or temperature limitations, the DGU may
653
+ simply set its actuation state αk = 0 to disable its control. If
654
+ some power can still be supplied, it may simply be regarded
655
+ as a negative load. This allows DGUs to contribute to the
656
+ power supply of the network, even in the face of control
657
+ limitations.
658
+ C. Control Problem
659
+ A central requirement for DC microgrids is voltage
660
+ stability, which requires the bus voltages to remain within
661
+ a given tolerance band around the reference vRef. Spe-
662
+ cifically, this requirement should be met throughout the
663
+ network, and not only at the actuated buses. Due to
664
+ the presence of lossy lines, power flows are associated
665
+ with voltage differences between buses, meaning that
666
+ vk → vRef, ∀k ∈ N is not practical. Ideally, the voltages at
667
+ all buses should be arrayed in the tolerance band around
668
+ vRef and be as close to vRef as possible4. The manipulated
669
+ variables used to achieve this are the power setpoints p∗
670
+ k
671
+ supplied to the actuated DGUs (19). This leads to the
672
+ first objective for the control of the DC microgrid, which
673
+ involves finding the setpoints p∗
674
+ k that ensure the weighted
675
+ average voltage equals vRef at steady state.
676
+ Objective 1 (Weighted voltage consensus).
677
+ Find p∗
678
+ k s.t. lim
679
+ t→∞
680
+ 1
681
+ N
682
+
683
+ k∈N
684
+ h(vk(t)) = vRef
685
+ (21)
686
+ for a strictly increasing weighting function h : R → R.
687
+ By choosing a nonlinear h, large voltage errors may be
688
+ weighed more strongly. This allows for better utilisation of
689
+ the tolerance band since bus voltages can be further from
690
+ vRef before registering as a significant error.
691
+ In addition to Objective 1, it is desired that all actuated
692
+ DGUs contribute towards supplying and stabilising this
693
+ network. Ensuring that all DGUs receive the same setpoint
694
+ spreads the load across actuated buses, leading to a reduc-
695
+ tion in localised stress on the DGUs. We thus formulate
696
+ the second objective as requiring uniform setpoints for the
697
+ DGUs in steady state.
698
+ Objective 2 (Cooperative power sharing).
699
+ lim
700
+ t→∞(p∗
701
+ k(t) − p∗
702
+ l (t)) = 0,
703
+ ∀ k, l ∈ N
704
+ (22)
705
+ Achieving Objectives 1 and 2 thus yields a controlled
706
+ microgrid where the average weighted voltage error of all
707
+ buses tends to zero through the coordinated action of the
708
+ actuated buses in a distributed fashion. These objectives
709
+ also allow DGUs to transition seamlessly between actuated
710
+ and unactated states and ensure no measurement inform-
711
+ ation is discarded simply because a bus cannot regulate
712
+ itself. Notice that disregarding the unactuated buses in
713
+ Objectives 1 and 2 yields the objectives typically used in
714
+ the literature [4], [6], [12]–[14], [16], [17], [20].
715
+ To achieve these objectives, we make the following
716
+ assumptions related to appropriate network design.
717
+ Assumption 1 (Feasible network). The available power
718
+ sources can feasibly supply the loads with power over the
719
+ 4The magnitude of the errors vRef − vk strongly depend on the
720
+ loads and line resistance. Small errors therefore presuppose adequate
721
+ network design.
722
+
723
+ 6
724
+ hw
725
+ hw
726
+ DDA2,1
727
+ DDA2,N
728
+ PI1
729
+ PIN
730
+ DDA4,1
731
+ DDA4,N
732
+ DC MG
733
+ Stage 1
734
+ Stage 2
735
+ Stage 3
736
+ Stage 4
737
+ uw
738
+ uw,1
739
+ uw,N
740
+ yw,1
741
+ yw,N
742
+ ya,2,1
743
+ ya,2,N
744
+ yc,1
745
+ yc,N
746
+ ya,4,1
747
+ ya,4,N
748
+ p∗
749
+ v
750
+
751
+ vRef1N
752
+ +
753
+ Figure 2: Distributed four-stage control connected in feed-
754
+ back to the microgrid and with indicated communication
755
+ links
756
+ between the local control structures.
757
+ given electrical network, i.e. a suitable equilibrium for the
758
+ microgrid exists.
759
+ Assumption 2 (Number of actuated DGUs). At least one
760
+ DGU is actuated at any given time, i.e. Nα ̸= ∅.
761
+ Assumption 3 (Connected topologies). Objectives 1
762
+ and 2 only apply to a subset of buses electrically connected
763
+ as per GP. Moreover, for a distributed control, a connected
764
+ communication graph exclusively interconnects the same
765
+ subset of buses.
766
+ Note that Assumption 1 is a typically made implicitly or
767
+ explicitly in the literature (see e.g. the discussion in [16]).
768
+ Assumptions 2 and 3 further specify requirements that
769
+ allow a distributed control to achieve the feasible state
770
+ in Assumption 1, i.e. by ensuring that at least one source
771
+ of stabilisation is present in the network (Assumption 3),
772
+ and by ensuring that the coordination corresponds to the
773
+ network to be controlled Remark 5.
774
+ Remark 5 (Proportional power sharing). By normalising
775
+ the power setpoint p∗
776
+ k and weighing the input in (19)
777
+ according to the rated power of a given DGU, Objective 2
778
+ automatically describes a proportional power sharing. With
779
+ reference to Remark 4, this also allows the constrained
780
+ DGUs to lower their maximum injectable power instead
781
+ of setting the DGUs to the unactuated state αk = 0. We
782
+ omit the extension to proportional power sharing in this
783
+ work for simplicity.
784
+ IV. Control Structure
785
+ To meet Objectives 1 and 2, we propose the four-
786
+ stage control structure depicted in Fig. 2. This control
787
+ structure comprises two DDA implementations separated
788
+ by agent PI controllers local to the buses as in [23]. This
789
+ is prepended by a nonlinear weighting function hw. In the
790
+ Sections IV-A, IV-B and IV-C, we successively introduce
791
+ these respective subsystems. Finally in Section IV-D, we
792
+ show that the control structure meets the objectives.
793
+ A. DDA Controller
794
+ Consider the communiation graph GC = (N, EC) linking
795
+ the buses of the DC microgrid. The communication graph
796
+ comprises the same vertices as the physical interconnection
797
+ graph GP but possibly with a different topology. Let LC
798
+ denote the Laplacian of GC. For Stages 2 and 4 of the
799
+ control structure, each agent implements an instance of
800
+ the DDA5 described in [34]. The instances of the respective
801
+ stages may be combined into vector form as
802
+ DDAs
803
+
804
+
805
+
806
+
807
+
808
+ � ˙xa,s
809
+ ˙za,s
810
+
811
+ =
812
+
813
+ −γaIN −LC,P
814
+ LT
815
+ C,I
816
+ −LC,I
817
+ 0
818
+ ��
819
+ xa,s
820
+ za,s
821
+
822
+ +
823
+
824
+ γaIN
825
+ 0
826
+
827
+ ua,s,
828
+ ya,s = xa,s,
829
+ (23)
830
+ where s
831
+
832
+ {2, 4} denotes the stage in Fig. 2, and
833
+ xa,s, za,s ∈ RN are the consensus and integral states
834
+ respectively. Furthermore, γa > 0 is a global estimator
835
+ parameter (see [34]), and LC,I = kI
836
+ aLC and LC,P = kP
837
+ a LC
838
+ are Laplacian matrices weighted for the integral and pro-
839
+ portional responses, respectively. Recall from [34] that a
840
+ constant input ua,s yields
841
+ lim
842
+ t→∞ ya,s,k = uT
843
+ a,s1N
844
+ N
845
+ ,
846
+ ∀ k.
847
+ (24)
848
+ B. Agent PI Controller
849
+ In Stage 3, we equip each bus k ∈ N with a leaky agent
850
+ PI controller similar to the approach in [35]
851
+ PIk
852
+
853
+ ˙xc,k = −ζcxc,k + uc,k,
854
+ yc,k = kI
855
+ cxc,k + kP
856
+ c uc,k,
857
+ (25)
858
+ where xc,k ∈ R, ζc ≥ 0 and kP
859
+ c , kI
860
+ c > 0. Note that ζc = 0
861
+ reduces (25) to an ideal PI controller. The combined form
862
+ of the N agent controllers is
863
+ ˙xc = −ζcxc + uc,
864
+ yc = kI
865
+ cxc + kP
866
+ c uc
867
+ (26)
868
+ Remark 6 (Non-ideal integrators). As shown in the
869
+ sequel, ideal PI controllers only exhibit an IFP property,
870
+ whereas the DDA controller is OFP. The interconnection
871
+ in Fig. 2 thus yields a cascaded IFP-OFP structure which
872
+ obstructs the dissipativity analysis (see Prop. 7). The use
873
+ of leaky integrators (ζc > 0) overcomes this obstacle at the
874
+ cost of negatively affecting the steady-state properties, since
875
+ (25) forces the equilibrium
876
+ uc = ζcxc
877
+ (27)
878
+ instead of uc = 0. In the context of Fig. 2, this corresponds
879
+ to a unwanted steady-state offset for the average weighted
880
+ voltage error.
881
+ Remark 7 (Agent PI controller anti-windup). To prevent
882
+ controller windup, the input to the PI control in (25) should
883
+ be zeroed for any unactuated agents that are disconnected
884
+ from the communication network.
885
+ Remark
886
+ 8
887
+ (Non-participating agents). Implementing
888
+ (25) at each bus k ∈ N allows for a faster reaction to
889
+ disturbances at the cost of controller redundancy. By setting
890
+ ua,4,m := ya,4,m at Stage 4 DDA of the control structure
891
+ 5We implement the PI-DDA variant proposed in [34] and use the
892
+ same communication graph for the proportional and integral terms.
893
+
894
+ 7
895
+ u
896
+ y
897
+ hw(u)
898
+ dhw(u)
899
+ duw
900
+ Figure 3: Example of the weighting function hw (28) and
901
+ its derivative (58) on a unit grid, with aw = 0.5, bw = 1.5
902
+ and cw = 2.
903
+ for some agents m ∈ M ⊂ N, the PI control (25)
904
+ can be omitted at the agents in M without affecting the
905
+ steady state. Nevertheless, the measurements of the buses
906
+ in k ∈ M are still included in the Stage 2 DDA. Note that
907
+ at least one participating agent PI controller is required
908
+ (see [23, Remark 8]).
909
+ C. Weighting Function
910
+ To allow for a better utilisation of the tolerance band
911
+ around vRef, we desire a weighting function that assigns a
912
+ low gain for errors within the tolerance band and a high
913
+ gain for larger errors. We therefore define the class C1
914
+ function yw,k = hw(uw,k) conforming to (3), where
915
+ hw(u) := awu + bwgw(u) − bw tanh(gw(u)),
916
+ (28)
917
+ gw(u) :=
918
+
919
+
920
+
921
+ u + cw,
922
+ u < −cw
923
+ 0,
924
+ −cw ≤ u ≤ cw
925
+ u − cw,
926
+ cw < u
927
+ (29)
928
+ and where (29) describes a dead-zone parametrised by
929
+ cw. An example of (28) is depicted in Fig. 3 along with
930
+ its derivative. For a strictly increasing function as per
931
+ Objective 1, set aw > 0 and bw > −aw.
932
+ D. Equilibrium Analysis
933
+ In a first step towards analysing the closed loop, we
934
+ analyse the assumed equilibrium of the interconnected
935
+ microgrid and four-stage controller (see Assumption 1).
936
+ Specifically, we verify that the proposed control yields an
937
+ equilibrium which satisfies Objectives 1 and 2.
938
+ Proposition 8 (Controller equilibrium analysis). Con-
939
+ sider the DC microgrid comprising (15), (18), and (20)
940
+ which is connected in feedback with the four-stage con-
941
+ troller comprising (23), (26), and (28) as in Fig. 2. Let
942
+ Assumptions 1, 2 and 3 hold. Then, Objective 2 is met for
943
+ the equilibrium imposed by the control structure. Moreover,
944
+ Objective 1 is achieved exactly for ideal integrators ζc = 0
945
+ in (26). For lossy integrators with ζc > 0, the remaining
946
+ error for Objective 1 is be described by the steady-state
947
+ value of ya,2, where
948
+ ya,2 =
949
+ ζc
950
+ kIc(1 + ζckPc )ya,4.
951
+ (30)
952
+ The proof of Prop. 8 can be found in Appendix A.
953
+ Through Prop. 8 we thus confirm that the proposed con-
954
+ troller yields an equilibrium which meets the requirements,
955
+ even though the requirements are not perfectly met when
956
+ leaky agent PI controllers are used. We also note that
957
+ Prop. 8 only considers the controlled microgrid already in
958
+ equilibrium and does not consider the convergence to the
959
+ equilibrium.
960
+ Remark 9 (Compensating leaky-integral errors). As in-
961
+ dicated by (30) in Prop. 8, the leaky agent PI controllers
962
+ result in a constant steady-state error for the average
963
+ voltage regulation (Objective 1). Since a positive ya,2 cor-
964
+ responds to voltages below the desired vRef, it follows
965
+ that setting vRef above the actual desired voltage reference
966
+ will result in higher bus voltages. Changing vRef thus
967
+ allows the steady-state effects of the leaky integrators to be
968
+ compensated. Moreover, notice that ya,4 is the controller
969
+ output, i.e. the power setpoint p∗ used for the DGUs
970
+ (see Fig. 2). Thus, the error measure in (30), which is
971
+ only dependent on the controller output, can be used to
972
+ determine the offset to vRef for exact voltage regulation.
973
+ Note, however, that modifying vRef based on p∗ results in
974
+ a new loop which requires an additional stability analysis.
975
+ V. Subsystem Passivity Analysis
976
+ Having verified whether the desirable steady state is
977
+ achieved by the controller, we now set about analysing the
978
+ convergence to this steady state. With the aim of applying
979
+ Theorem 6 for the closed-loop stability, we first analyse the
980
+ passivity properties of the individual subsystems. Since
981
+ the steady-state bus voltages ˆvk are unknown and non-
982
+ zero, we investigate the passivity properties shifted to any
983
+ plausible point of operation using EIP. To this end, we
984
+ construct an EIP formulation for the DC microgrid from
985
+ its constitutive elements in Section V-A. This is followed
986
+ by the respective analyses of the various controller stages
987
+ in Section V-B. Note that we omit the bus indices k and
988
+ l in this section where clear from context.
989
+ A. DC Microgrid Passivity
990
+ For the stability of the microgrid at the equilibrium
991
+ ˆv, we desire an EIP property relating the shifted input
992
+ power setpoints ˜p∗ = p∗ − ˆp∗ to the output voltage errors
993
+ ˜v = v − ˆv of all nodes, since this port (˜p∗, ˜v) is used
994
+ by the controller in Fig. 2. To this end, we derive EIP
995
+ properties for the load, DGU and line subsystems of the
996
+ microgrid, making sure to shift the subsystem dynamics to
997
+ the assumed equilibrium in each case (see Assumption 1).
998
+ Thereafter, we combine the results of these subsystems, to
999
+ construct an EIP property for the microgrid as a whole.
1000
+ Where applicable, an analysis of the zero-state dynamics is
1001
+ performed to ensure the eventual stability of the controlled
1002
+ microgrid.
1003
+ 1) Load Passivity: Let the unactuated bus dynamics
1004
+ in (15) for the buses in Nβ be shifted to the equilibrium
1005
+ (ˆit, ˆv), yielding
1006
+ Ceq ˙˜v = −eT
1007
+ P,k˜it − ˜IL(˜v) + (eT
1008
+ P,kˆit + IL(ˆv)),
1009
+ (31)
1010
+
1011
+ 8
1012
+ for the static load function shifted according to (3). In
1013
+ (31), eT
1014
+ P,kˆit = −IL(ˆv) since the load is fully supplied by
1015
+ the cumulative line currents in steady state.
1016
+ Proposition 9 (Load EIP). The shifted load dynamics in
1017
+ (31) are OFP(ρL) w.r.t. the input-output pair (−eT
1018
+ P,k˜it, ˜v)
1019
+ with ρL = cL the smallest gradient of the static load
1020
+ function IL(v).
1021
+ Proof. Consider the storage function SL along with its
1022
+ time derivative
1023
+ SL = Ceq
1024
+ 2 ˜v2,
1025
+ (32)
1026
+ ˙SL = −˜veT
1027
+ P,k˜it − ˜v ˜IL(˜v).
1028
+ (33)
1029
+ Since the static load function IL(v) is IF-OFP according
1030
+ to Prop. 5, it is bounded from below by cL˜v2 ≤ ˜v ˜IL(˜v)
1031
+ (see (6)). Incorporate this lower bound into (33) to obtain
1032
+ ˙SL ≤ wL := −˜veT
1033
+ P,k˜it − cL˜v2
1034
+ (34)
1035
+ which yields the OFP property from Definition 2.
1036
+
1037
+ Remark 10 (ZIP load passivity). Prop. 9 and (4) demon-
1038
+ strate that the passivity properties of the unactuated buses
1039
+ are directly linked to the smallest gradient of the load
1040
+ function. For the ZIP load in (16), this yields
1041
+ cL = min
1042
+
1043
+ Z−1, Z−1 −
1044
+ P
1045
+ v2
1046
+ crit
1047
+ , Z−1
1048
+ crit
1049
+
1050
+ .
1051
+ (35)
1052
+ Considering the strictly passive case (cL = 0) along with
1053
+ I, P ≥ 0 yields the passivity condition Z−1v2
1054
+ crit ≥ P
1055
+ frequently used in the literature [10], [16], [18]–[20].
1056
+ 2) DGU Passivity: Shift the states (e, i, v) and inputs
1057
+ (p∗, it) of the DGU dynamics in (20) for the buses in Nα to
1058
+ the respective error variables (˜e,˜i, ˜v) and (˜p∗,˜it) to obtain
1059
+ (36) on the next page, where the static load function
1060
+ is incorporated into the matrix Ad. Furthermore, the
1061
+ measured power p = vi = v(˜i + ˆi) in (19) is left partially
1062
+ in unshifted variables such that Ad is also dependent on
1063
+ the unshifted voltage v and the steady-state current ˆi.
1064
+ Note that the constant τd in (36) is found by setting the
1065
+ error variables (˜p∗,˜it, ˜ed,˜i, ˜v) and their time derivatives to
1066
+ zero. As such, the constant τd ≡ 0 can be disregarded
1067
+ in the passivity analysis. We now analyse the shifted
1068
+ nonlinear system in (36) for EIP.
1069
+ Theorem 10 (EIP DGUs). The shifted DGU dynamics in
1070
+ (36) are simultaneously IF-OFP(νd,1, ρd) w.r.t. the input-
1071
+ output pair (˜p∗, ˜v) and IFP(νd,2) w.r.t. the input-output
1072
+ pair (−eT
1073
+ k ˜it, ˜v), if a feasible solution can be found for
1074
+ max
1075
+ Pd, νd,1, νd,2, ρd νd,1 + νd,2 + ρd
1076
+ s.t.
1077
+ (38) holds ∀ v ∈ V ⊆ R+, ∀ˆi ∈ ˆI ⊆ R
1078
+ (37)
1079
+ where Qd(v,ˆi, cL) := PdAd(v,ˆi, cL) + AT
1080
+ d (v,ˆi, cL)Pd,
1081
+ Ad(v,ˆi, cL) =
1082
+
1083
+
1084
+ 0
1085
+ −v
1086
+ −ˆi
1087
+ kI
1088
+ d
1089
+ ˜R − R − kP
1090
+ d v
1091
+ −1 − kP
1092
+ c ˆi
1093
+ 0
1094
+ 1
1095
+ −cL
1096
+
1097
+  ,
1098
+ (39)
1099
+ and with νd,1, νd,2, ρd ∈ R, cL as in (4) and cd = [0, 0, 1]T.
1100
+ Proof. Consider for (36) the storage function
1101
+ Sd =
1102
+
1103
+
1104
+ ˜ed
1105
+ ˜i
1106
+ ˜v
1107
+
1108
+
1109
+ T
1110
+ Pd
1111
+
1112
+
1113
+ ˜ed
1114
+ L˜i
1115
+ Ceq˜v
1116
+
1117
+  ,
1118
+ (40)
1119
+ with Pd ≻ 0. The time derivative of (40) is
1120
+ ˙Sd =
1121
+
1122
+
1123
+ ˜xd
1124
+ ˜p∗
1125
+ eT
1126
+ P,k˜it
1127
+
1128
+
1129
+ T
1130
+ 
1131
+ Qd(v,ˆi,
1132
+ ˜IL(˜v)
1133
+ ˜v
1134
+ )
1135
+ Pdbd,1
1136
+ Pdbd,2
1137
+ bT
1138
+ d,1Pd
1139
+ 0
1140
+ 0
1141
+ bT
1142
+ d,2Pd
1143
+ 0
1144
+ 0
1145
+
1146
+ 
1147
+
1148
+
1149
+ ˜xd
1150
+ ˜p∗
1151
+ eT
1152
+ P,k˜it
1153
+
1154
+ ,
1155
+ (41)
1156
+ with ˜xd as in (36). Since it follows from (6) that −˜v ˜IL(˜v) ≤
1157
+ −cL˜v2, this bound can be incorporated into the inequality
1158
+ ˙Sd ≤
1159
+
1160
+
1161
+ ˜xd
1162
+ ˜p∗
1163
+ eT
1164
+ P,k˜it
1165
+
1166
+
1167
+ T
1168
+
1169
+ Qd(v,ˆi, cL)
1170
+ Pdbd,1
1171
+ Pdbd,2
1172
+ bT
1173
+ d,1Pd
1174
+ 0
1175
+ 0
1176
+ bT
1177
+ d,2Pd
1178
+ 0
1179
+ 0
1180
+
1181
+
1182
+
1183
+
1184
+ ˜xd
1185
+ ˜p∗
1186
+ eT
1187
+ P,k˜it
1188
+
1189
+ .
1190
+ (42)
1191
+ The desired IF-OFP and IFP properties for the DGU are
1192
+ described by the supply rate
1193
+ wd = (1 + νd,1ρd)˜p∗˜v − νd,1(˜p∗)2 − ρd˜v2
1194
+ − ˜veT
1195
+ P,k˜it − νd,2
1196
+
1197
+ eT
1198
+ P,k˜it
1199
+ �2
1200
+ (43)
1201
+ These properties are guaranteed, if ˙Sd−wd < 0 for all valid
1202
+ inputs and outputs and for v ∈ V and ˆi ∈ ˆI. Combining
1203
+ (42) and (43) in this manner directly leads to constraint
1204
+ (38) in (37). Finally, the objective function in (37) seeks
1205
+ to find the largest indices for which the constraints are
1206
+ satisfied in a similar manner to Theorem 6.
1207
+
1208
+ Although Theorem 10 demonstrates the EIP of the
1209
+ actuated buses, notice that the ˜ed and ˆi of (36) are
1210
+ not included in the supply rate wd in (43). As such, an
1211
+ investigation of the zero state dynamics of the DGU is
1212
+ required.
1213
+ Proposition 11 (ZSO DGUs). The shifted DGU dynam-
1214
+ ics in (36) are ZSO.
1215
+ Proof. In (36), set the inputs ˜p∗ ≡ 0, ˜it ≡ 0 and the
1216
+ output ˜v ≡ 0. Since τd = 0 and ˜IL(0) = 0, verify from the
1217
+ equation for ˙˜v that ˜i ≡ 0. From the equation for ˙˜i, it then
1218
+ follows that ˜ed ≡ 0 which concludes this proof.
1219
+
1220
+ Remark
1221
+ 11
1222
+ (Compensating
1223
+ non-passive
1224
+ loads).
1225
+ As
1226
+ demonstrated in [11], adding a term dependent on ˙vk to
1227
+ the regulator output vs,k in (19) allows for damping to
1228
+ be added to the unactuated state vk. This in turn allows
1229
+ for regulation in the presence of non-passive loads and
1230
+ can yield more favourable passivity indices when applying
1231
+ Theorem 10.
1232
+ 3) Line Passivity: The dynamics of the line subsystem
1233
+ (18) shifted to the equilibrium (ˆit, ˆv) yield
1234
+ Lkl˙˜it = −Rkl˜it + eT
1235
+ P,kl˜v,
1236
+ (44)
1237
+ which can now be analysed for passivity.
1238
+
1239
+ 9
1240
+
1241
+
1242
+ ˙˜ed
1243
+ L˙˜i
1244
+ Ceq ˙˜v
1245
+
1246
+ =
1247
+
1248
+ 
1249
+ 0
1250
+ −v
1251
+ −ˆi
1252
+ kI
1253
+ d
1254
+ ˜R−R−kP
1255
+ d v
1256
+ −1−kP
1257
+ c ˆi
1258
+ 0
1259
+ 1
1260
+
1261
+ ˜IL(˜v)
1262
+ ˜v
1263
+
1264
+ 
1265
+
1266
+ ��
1267
+
1268
+ Ad(v,ˆi,
1269
+ ˜IL(˜v)
1270
+ ˜v
1271
+ )
1272
+
1273
+
1274
+ ˜ed
1275
+ ˜i
1276
+ ˜v
1277
+
1278
+
1279
+ ����
1280
+ ˜xd
1281
+ +
1282
+
1283
+
1284
+ 1
1285
+ kP
1286
+ d
1287
+ 0
1288
+
1289
+
1290
+ ����
1291
+ bd,1
1292
+ ˜p∗ −
1293
+
1294
+
1295
+ 0
1296
+ 0
1297
+ 1
1298
+
1299
+
1300
+ ����
1301
+ bd,2
1302
+ eT
1303
+ P,k˜it +
1304
+
1305
+
1306
+ ˆp∗ − ˆvˆi
1307
+ kI
1308
+ c ˆed + ( ˜R−R)ˆi − ˆv + vRef − kP
1309
+ c (ˆp∗ − ˆvˆi)
1310
+ ˆi − eT
1311
+ P,kˆit − IL(ˆv)
1312
+
1313
+
1314
+
1315
+ ��
1316
+
1317
+ τd
1318
+ (36)
1319
+
1320
+
1321
+ Qd(v,ˆi, cL) + ρdcdcT
1322
+ d
1323
+ Pdbd,1 − 1+νd,1ρd
1324
+ 2
1325
+ cd
1326
+ Pdbd,2 − 1
1327
+ 2cd
1328
+ bT
1329
+ d,1Pd − 1+νd,1ρd
1330
+ 2
1331
+ cT
1332
+ d
1333
+ νd,1
1334
+ 0
1335
+ bT
1336
+ d,2Pd − 1
1337
+ 2cT
1338
+ d
1339
+ 0
1340
+ νd,2
1341
+
1342
+  ≺ 0,
1343
+ Pd ≻ 0
1344
+ (38)
1345
+ Proposition 12 (OFP lines). The shifted line dynamics
1346
+ in (44) are OFP(ρt) with ρt = Rkl w.r.t. the input-output
1347
+ pair (eT
1348
+ P,kl˜v,˜it) with the storage function
1349
+ St = Lkl
1350
+ 2
1351
+ ˜i2
1352
+ t.
1353
+ (45)
1354
+ Proof. The proof follows trivially by verifying that
1355
+ ˙St = ˜iteT
1356
+ P,kl˜v − Rkl˜i2
1357
+ t =: wt,
1358
+ (46)
1359
+ where wt in an OFP supply rate as per Definition 2.
1360
+
1361
+ 4) Interconnected Microgrid Dissipativity: Having sep-
1362
+ arately analysed the subsystems comprising the microgrid,
1363
+ we now combine the results to formulate the dissipativity
1364
+ of the full microgrid w.r.t. the input-output pair (˜p∗, ˜v).
1365
+ For simplicity, we group the buses according to their
1366
+ actuation states (13). Thus, ˜p∗ = [ ˜p∗
1367
+ α
1368
+ T , ˜p∗
1369
+ β
1370
+ T ]T and ˜v =
1371
+ [˜vT
1372
+ α , ˜vT
1373
+ β ]T have the same dimensions. Note that we include
1374
+ the inputs ˜p∗
1375
+ β for the unactuated buses in Nβ as provided
1376
+ by the four-stage controller (see Fig. 2), even though these
1377
+ inputs are not used.
1378
+ Proposition 13 (Microgrid dissipativity). A DC mi-
1379
+ crogrid comprising DGUs (20), lines (18) and loads (15)
1380
+ with an interconnection topology described by a connected
1381
+ graph GP is dissipative w.r.t. the supply rate
1382
+ wM,αβ = (1 + νd,1ρd)˜p∗
1383
+ α
1384
+ T ˜vα − νd,1 ˜p∗
1385
+ α
1386
+ T ˜p∗
1387
+ α
1388
+ − ρd˜vT
1389
+ α ˜vα − ρL˜vT
1390
+ β ˜vβ,
1391
+ (47)
1392
+ if νd,2 + ρt ≥ 0 for the worst-case indices of the buses
1393
+ and lines calculated in Prop. 9 (ρL), Prop. 12 (ρt), and
1394
+ Theorem 10 (νd,1, νd,2, ρd), i.e.
1395
+ νd,1 = min
1396
+ k∈Nα νd,1,k, νd,2 = min
1397
+ k∈Nα νd,2,k, ρd = min
1398
+ k∈Nα ρd,k,
1399
+ ρL = min
1400
+ k∈Nβ ρL,k,
1401
+ ρt = min
1402
+ kl∈EP ρt,k.
1403
+ (48)
1404
+ Proof. Define for the interconnected microgrid the storage
1405
+ function
1406
+ SM =
1407
+
1408
+ k∈Nα
1409
+ Sd,k +
1410
+
1411
+ k∈Nβ
1412
+ SL,k +
1413
+
1414
+ kl∈EP
1415
+ St,kl.
1416
+ (49)
1417
+ An upper bound for time derivative of (49) may then be
1418
+ found by combining the supply rates in (34), (43) and (46)
1419
+ ˙SM ≤ (1 + νd,1ρd)˜p∗
1420
+ α
1421
+ T ˜vα − νd,1 ˜p∗
1422
+ α
1423
+ T ˜p∗
1424
+ α − ρd˜vT
1425
+ α ˜vα
1426
+ + ˜iT
1427
+ t ET ˜v − ˜vT
1428
+ α Eα˜it − ˜vT
1429
+ β Eβ˜it
1430
+ − ρL˜vT
1431
+ β ˜vβ − (νd,2 + ρt)˜iT
1432
+ t ˜it.
1433
+ (50)
1434
+ The skew-symmetric interconnection of the nodes and lines
1435
+ results in ˜iT
1436
+ t ET ˜v = ˜vT
1437
+ α Eα˜it + ˜vT
1438
+ β Eβ˜it. Furthermore with
1439
+ νd,2+ρt ≥ 0, we can drop the unnecessary strictly negative
1440
+ ˜iT
1441
+ t ˜it term and verify that ˙SM ≤ wM,αβ.
1442
+
1443
+ Through Prop. 13, the dissipativity of the entire mi-
1444
+ crogrid is formulated using the desired input and output
1445
+ vectors. However, the supply rate in (47) is dependent on
1446
+ the actuation states of the buses. We now remove this
1447
+ dependence by finding a supply rate for a specific bus that
1448
+ encompasses both its actuated and unactuated state. By
1449
+ considering a quadratic supply rate as a sector condition
1450
+ (see [26], [29]), a combined supply rate is found through
1451
+ the union of the sectors for the actuated and unactuated
1452
+ cases.
1453
+ Theorem 14 (Actuation independent passivity). A DC
1454
+ microgrid for which Prop. 13 holds is IF-OFP(νd,1, ρd)
1455
+ w.r.t. the supply rate
1456
+ wM = (1 + νd,1ρd)˜p∗T ˜v − νd,1 ˜p∗T ˜p∗ − ρd˜vT ˜v
1457
+ (51)
1458
+ if, for an arbitrarily small νL > 0,
1459
+ 0 ≤ νd,2 + ρt,
1460
+ (52)
1461
+ 0 < ρL < 1,
1462
+ (53)
1463
+ 0 > νd,1.
1464
+ (54)
1465
+ The proof of Theorem 14 can be found in Appendix A.
1466
+ Through (51), we thus show that a single IF-OFP supply
1467
+ rate describes the input-output passivity of the entire
1468
+ microgrid, irrespective of the states of actuation of the
1469
+ buses. This supply rate is derived from the properties of
1470
+ the DGUs in Theorem 10 and accounts for the worst-case
1471
+ loads.
1472
+ Remark 12 (Non-passive loads at DGUs). While (53)
1473
+ in Theorem 14 requires strictly passive loads at unactuated
1474
+ buses, this is not required for the loads at actuated buses.
1475
+
1476
+ 10
1477
+ Indeed, the loads at DGUs may exhibit a lack of passivity
1478
+ with cL < 0. However, this would be reflected by the indices
1479
+ obtained in Theorem 10 and the supply rate in (51).
1480
+ Remark 13 (Non-static loads). Due to the use of passivity
1481
+ in this section, the analysis presented here effortlessly
1482
+ extends to the case of dynamic loads. Such dynamic loads
1483
+ simply need to exhibit equivalent IFP properties (see e.g.
1484
+ Prop. 9) and must be ZSO.
1485
+ Remark 14 (Passivity-based controllers). In addition
1486
+ to the four-stage controller proposed in this work, the
1487
+ passivity formulation of the DC microgrid in Theorem 14
1488
+ can be used alongside any other controller which provides
1489
+ suitable passivity indices. This includes methods such as
1490
+ interconnection and damping assignment passivity-based
1491
+ control [24, p. 190] or passivity-based model predictive
1492
+ control (see e.g. [36]).
1493
+ B. Controller Passivity
1494
+ Having analysed the passivity of the microgrid subsys-
1495
+ tems and their interconnection, we now investigate the
1496
+ passivity properties of the control structure in Section IV.
1497
+ This is done successively for each part of the controller:
1498
+ the DDA stages, the PI stage and the weighting function.
1499
+ 1) DDA Passivity: Consider the DDA stages in Fig. 2.
1500
+ Proposition 15 (DDA Passivity). The DDA controller
1501
+ in (23) with the storage function
1502
+ Sa,s =
1503
+ 1
1504
+ 2γa
1505
+
1506
+ xT
1507
+ a,sxa,s + zT
1508
+ a,sza,s
1509
+
1510
+ (55)
1511
+ is OFP(ρa), ρa = 1, w.r.t. (ua,s, ya,s) and is ZSO.
1512
+ Proof. The time derivative of (55) is
1513
+ ˙Sa,s = −xT
1514
+ a,sxa,s − 1
1515
+ γa
1516
+ xT
1517
+ a,sLC,P xa,s + xT
1518
+ a,sua,s
1519
+ ≤ wa,s := xT
1520
+ a,sua,s − xT
1521
+ a,sxa,s
1522
+ (56)
1523
+ since LC,P > 0 and γa > 0, thus verifying the OFP
1524
+ property for ya,s = xa,s. Furthermore, the DDA controller
1525
+ is ZSO since the system dynamics in (23) is Hurwitz [34,
1526
+ Theorem 5].
1527
+
1528
+ The OFP result in Prop. 15 also means that (23) has an
1529
+ L2-gain of 1 [28, p. 3]. Note that since the DDA in (23) is
1530
+ linear, the properties in Prop. 15 also hold for the shifted
1531
+ input-output combination (˜ua,s, ˜ya,s) [28, p. 26].
1532
+ 2) PI Passivity: The ideal PI controller in (26) with
1533
+ ζc = 0 can trivially be shown to be IFP(kP
1534
+ c ) for the storage
1535
+ function Sc = kIcxTc xc/2. The leaky PI control with ζc > 0
1536
+ exhibits the following properties.
1537
+ Proposition 16 (Leaky PI Passivity). The leaky PI
1538
+ control in (26) with the storage function Sc = kIcxTc xc/2
1539
+ is dissipative w.r.t. the supply rate
1540
+ wc =
1541
+
1542
+ 1+2ζckP
1543
+ c
1544
+ kIc
1545
+
1546
+
1547
+ ��
1548
+
1549
+ 2σc
1550
+ uT
1551
+ c yc −
1552
+
1553
+ kP
1554
+ c +ζckP
1555
+ c
1556
+ 2
1557
+ kIc
1558
+
1559
+
1560
+ ��
1561
+
1562
+ νc
1563
+ uT
1564
+ c uc − ζc
1565
+ kIc
1566
+ ����
1567
+ ρc
1568
+ yT
1569
+ c yc
1570
+ (57)
1571
+ Proof. Calculate the time derivative of Sc as
1572
+ ˙Sc
1573
+ =
1574
+ kI
1575
+ cxT
1576
+ c uc −ζckI
1577
+ cxT
1578
+ c xc. Substitute in kI
1579
+ cxc = yc −kP
1580
+ c uc from
1581
+ the output in (26) and simplify to verify that ˙Sc = wc.
1582
+
1583
+ Note that while wc in (57) has a quadratic form, it does
1584
+ not directly match the IF-OFP form in Definition 2. How-
1585
+ ever, by appropriately weighing the storage function Sc,
1586
+ the form in Definition 2 is easily obtained. For simplicity
1587
+ and without invalidating the results in the sequel, we omit
1588
+ this step here. Furthermore, we note that the linearity of
1589
+ (26) ensures that the properties in Prop. 16 also hold for
1590
+ the shifted input-output combination (˜uc, ˜yc) [28, p. 26].
1591
+ 3) Weighting Function Passivity: The derivative of the
1592
+ weighting function in (28) is described by (see e.g. Fig. 3)
1593
+ dyw
1594
+ duw
1595
+ = aw + bw tanh2(gw(uw)).
1596
+ (58)
1597
+ By setting bw > −aw and applying Prop. 5, (28) is found
1598
+ to be IF-OFP(νw, ρw) with
1599
+ νw = aw,
1600
+ ρw =
1601
+ 1
1602
+ aw + bw
1603
+ ,
1604
+ (59)
1605
+ VI. Interconnected Stability
1606
+ Using the passivity properties of the microgrid and
1607
+ controller subsystems obtained in Section V, we now in-
1608
+ vestigate the stability of the microgrid and controller
1609
+ interconnected as in Fig. 2. However, we note that the
1610
+ agent PI controller and the Stage 4 DDA controller ex-
1611
+ hibit a cascaded IFP-OFP obstacle (see Prop. 7) if the
1612
+ PI controller is ideal (ζc = 0) which prevents a closed-
1613
+ loop analysis with dissipativity. Thus, in Section VI-A, we
1614
+ derive stability conditions using leaky agent PI controllers
1615
+ with ζc > 0.
1616
+ A. Leaky PI-Controlled Stability
1617
+ Consider the case where the passivity properties of all
1618
+ subsystems in Fig. 2 except for the weighting function
1619
+ (28) are fixed. Combining the results in Section V with
1620
+ Theorem 6, we now determine the weighting function
1621
+ parameters which guarantee closed-loop stability.
1622
+ Theorem
1623
+ 17
1624
+ (Designed
1625
+ closed-loop
1626
+ stability).
1627
+ The
1628
+ closed-loop in Fig. 2 is guaranteed to be asymptotically
1629
+ stable for the weighting function parameters aw = νw,
1630
+ bw = 1/ρw − aw if a feasible solution is found for
1631
+ min
1632
+ νw, ρw, di,
1633
+ νw + ρw
1634
+ s.t.
1635
+ Q ≺ 0,
1636
+ di > 0,
1637
+ i = 1, . . . , 5,
1638
+ (60)
1639
+ where σw = 1/2(1 + νwρw), σd = 1/2(1 + νd,1ρd), and
1640
+ Q=
1641
+
1642
+ 
1643
+ −ρwd1
1644
+ d2
1645
+ 2
1646
+ 0
1647
+ 0
1648
+ −σwd1
1649
+ d2
1650
+ 2
1651
+ −ρad2−νcd3 σcd3
1652
+ 0
1653
+ 0
1654
+ 0
1655
+ σcd3
1656
+ −ρcd3
1657
+ kP
1658
+ c d4
1659
+ 2
1660
+ 0
1661
+ 0
1662
+ 0
1663
+ kP
1664
+ c d4
1665
+ 2
1666
+ −ρad4−νd,1d5
1667
+ σdd5
1668
+ −σwd1
1669
+ 0
1670
+ 0
1671
+ σdd5
1672
+ −ρdd5−νwd1
1673
+
1674
+ 
1675
+ (61)
1676
+ Proof. Use the supply rates for the DC microgrid in (51),
1677
+ the two DDA controllers in (56), the agent PI controller
1678
+
1679
+ 11
1680
+ in (57), and the IF-OFP supply rate for the weighting
1681
+ function (59) to construct W in (10). Let the output of
1682
+ the PI controller be normalised according to
1683
+ yc = kI
1684
+ cxc + kP
1685
+ c uc = kP
1686
+ c (κI
1687
+ cxc + uc) = kP
1688
+ c yκ
1689
+ c .
1690
+ (62)
1691
+ Furthermore, the five subsystems in Fig. 2 are intercon-
1692
+ nected by u = Hy, where
1693
+ H =
1694
+
1695
+ ���
1696
+ 0
1697
+ 0
1698
+ 0
1699
+ 0
1700
+ −1
1701
+ 1
1702
+ 0
1703
+ 0
1704
+ 0
1705
+ 0
1706
+ 0
1707
+ 1
1708
+ 0
1709
+ 0
1710
+ 0
1711
+ 0
1712
+ 0
1713
+ kP
1714
+ c
1715
+ 0
1716
+ 0
1717
+ 0
1718
+ 0
1719
+ 0
1720
+ 1
1721
+ 0
1722
+
1723
+ 
1724
+ .
1725
+ (63)
1726
+ Apply Theorem 6, with D as in (9) and simplify Q in (8)
1727
+ to obtain (61). This yields the optimisation problem (60),
1728
+ where the indices of the weighting function (νw, ρw) are
1729
+ configurable. Asymptotic stability is ensured by changing
1730
+ the matrix inequality in (7) to a strict inequality and by
1731
+ ensuring that any states not present in y are asymptot-
1732
+ ically stable. The latter condition is ensured through the
1733
+ zero-state analyses in Prop. 11 and Prop. 15 and through
1734
+ the condition in Prop. 13. Finally, the parameters aw and
1735
+ bw are calculated from (59).
1736
+
1737
+ Through the application of Theorem 17, the parameters
1738
+ for the weighting function can thus be designed to ensure
1739
+ stability. We highlight that the results in Section V and
1740
+ Theorem 17 hold irrespective of the physical or commu-
1741
+ nication topologies and are independent of the actuation
1742
+ states of the nodes, as long as Assumptions 2 and 3
1743
+ hold. Therefore, verifying Theorem 17 ensures robust-
1744
+ ness against any changes which do not alter the worst-
1745
+ case passivity indices of the respective subsystems (see
1746
+ (48)). Note that the presented stability analysis requires
1747
+ strictly passive loads and leaky agent PI controllers (see
1748
+ Remark 6). As demonstrated via simulation, these require-
1749
+ ments are sufficient for stability, but not necessary.
1750
+ VII. Simulation
1751
+ In this section, we demonstrate the coordination and
1752
+ robustness of the proposed control structure by means of
1753
+ a Matlab/Simulink simulation using Simscape com-
1754
+ ponents. We consider the network comprising 10 buses
1755
+ depicted in Fig. 4. In Section VII-A, we describe the setup
1756
+ of the simulation along with the various changes that the
1757
+ network is subjected to. Next, in Section VII-B, simula-
1758
+ tion results are presented for the case where Theorem 17
1759
+ holds, i.e. with strictly passive loads and leaky agent PI
1760
+ controllers. Finally, in Section VII-C, we show the robust
1761
+ stability of the proposed control structure for passive loads
1762
+ and ideal agent PI controllers.
1763
+ A. Simulation Setup
1764
+ The DC microgrid in Fig. 4 is simulated with the
1765
+ parameters in Table I. The ZIP load parameters are
1766
+ chosen randomly in the specified ranges such that the
1767
+ required passivity measures are fulfilled (see Remark 10).
1768
+ Table I: Simulation Parameter Values
1769
+ Voltages
1770
+ vRef = 380 V
1771
+ vcrit = 266 V
1772
+ DGU Filters (14)
1773
+ Rk = 0.2 Ω
1774
+ Lk = 1.8 mH
1775
+ Ck = 2.2 mF
1776
+ ZIP Loads (16)
1777
+ |Z−1| ≤ 0.1/Ω
1778
+ |I| ≤ 21 A
1779
+ |P | ≤ 3 kW
1780
+ Elec. Lines (18)
1781
+ Rkl = 0.1 Ω/km
1782
+ Lkl = 2 µH/km
1783
+ Ckl = 22 nF/km
1784
+ length ∈ [0.2; 10] km
1785
+ Table II: Controller Parameter Values
1786
+ Power PI Control (19)
1787
+ kP
1788
+ d = 90
1789
+ kI
1790
+ d = 90
1791
+ ˜R = −8
1792
+ DDA Control (23)
1793
+ kP
1794
+ a = 50
1795
+ kI
1796
+ a = 100
1797
+ γa = 16
1798
+ Agent PI Control (26)
1799
+ kP
1800
+ c = 160
1801
+ kI
1802
+ c = 600
1803
+ ζc = 0.08
1804
+ Weighting Function (28)
1805
+ aw = 0.1
1806
+ bw = 1.1
1807
+ cw = 7.5 V
1808
+ Furthermore, typical values are used for the DGUs and
1809
+ the lines [4], [9], [13]. The lines exhibit the same per
1810
+ kilometer parameter values and the line length are chosen
1811
+ randomly in the given interval. The line lengths are given
1812
+ in Appendix B.
1813
+ The simulation starts off in State A (see Fig. 4) with
1814
+ Bus 9 connected and with all states at zero. The following
1815
+ changes are made at the indicated times.
1816
+ • t = 5 s: The actuation states αi of the buses switches
1817
+ from State A to State B and Bus 9 is disconnected.
1818
+ • t = 10 s: The communication topology switches from
1819
+ State A to State B and Bus 10 is connected.
1820
+ • t = 15 s: The electrical topology switches from State A
1821
+ to State B.
1822
+ • t = 20 s: The bus actuation status along with the com-
1823
+ munication and electrical topologies revert to State A.
1824
+ Bus 9 is connected and Bus 10 is disconnected.
1825
+ Furthermore, at each change, half of the buses are ran-
1826
+ domly selected and assigned new ZIP load parameters.
1827
+ The ZIP load parameters can be found in Appendix B.
1828
+ The parameters for the closed-loop controller, as spe-
1829
+ cified in Table II, are designed constructively, starting
1830
+ from the microgrid subsystems. First, the passivity indices
1831
+ for the lines (ρt = 0.01) and loads (ρL = cL = 0.05) are cal-
1832
+ culated from Prop. 12 and Prop. 9, respectively. Next, the
1833
+ parameters for the power regulator (19) are chosen and the
1834
+ DGU passivity indices are calculated from Theorem 10,
1835
+ with the optimisation verified for the practically relevant
1836
+ intervals v ∈ [200 V, 550 V] and ˆi ∈ [10 A, 350 A]. Note that
1837
+ adding the restriction νd,2 ≥ −ρt to the optimisation in
1838
+ Theorem 10 ensures that (52) will be met. This yields
1839
+ a solution νd,1 = −4.686, νd,2 = −0.01 and ρd = 0.01,
1840
+ from which the microgrid supply rate is constructed as
1841
+ per Theorem 14. Finally, parameters for the agent PI con-
1842
+ trollers are chosen and the weighting function parameters
1843
+ are designed using Theorem 17. Note that Theorem 14
1844
+ requires strictly passive loads (cL > 0) and Theorem 17
1845
+ necessitates leaky integrators (ζc > 0).
1846
+
1847
+ 12
1848
+ 1
1849
+ 2
1850
+ 3
1851
+ 4
1852
+ 5
1853
+ 6
1854
+ 7
1855
+ 8
1856
+ 9
1857
+ 10
1858
+ d
1859
+ d
1860
+ d
1861
+ d
1862
+ State A
1863
+ 1
1864
+ 2
1865
+ 3
1866
+ 4
1867
+ 5
1868
+ 6
1869
+ 7
1870
+ 8
1871
+ 9
1872
+ 10
1873
+ d
1874
+ d
1875
+ d
1876
+ d
1877
+ State B
1878
+ d
1879
+ Bus
1880
+ Active DGU
1881
+ Electrical line
1882
+ Communication line
1883
+ 0
1884
+ 0.5
1885
+ 1
1886
+ 0
1887
+ 0.5
1888
+ 1
1889
+ Figure 4: Two different states for a 10-bus DC microgrid along with electrical and communication connections. The
1890
+ loads at the buses are omitted for clarity.
1891
+ 0
1892
+ 5
1893
+ 10
1894
+ 15
1895
+ 20
1896
+ 25
1897
+ 340
1898
+ 360
1899
+ 380
1900
+ 400
1901
+ 420
1902
+ Figure 5: Simulated bus voltages with line colours as per
1903
+ the legend in Fig. 4.
1904
+ 0
1905
+ 5
1906
+ 10
1907
+ 15
1908
+ 20
1909
+ 25
1910
+ 0
1911
+ 40
1912
+ 80
1913
+ Figure 6: Simulated weighted voltage errors and the av-
1914
+ erage error of connected agents with agent line colours as
1915
+ per the legend in Fig. 4.
1916
+ B. Results
1917
+ The bus voltages vk shown in Fig. 5 confirm the stability
1918
+ of the closed loop results, although the voltages tend to
1919
+ be lower than desired, due to the use of leaky integrators.
1920
+ The remaining steady-state offset can also be seen in the
1921
+ weighted errors plotted in Fig. 6, where the average tends
1922
+ towards a non-zero value in each instance (see Remark 6).
1923
+ Despite this, the four stage controller reaches a consensus
1924
+ on the average of the nonlinear weighted voltage errors.
1925
+ Moreover, the advantage of the weighting function can
1926
+ be seen at Bus 6 in t ∈ [20 s, 25 s), where a significant
1927
+ weighted error only appears in Fig. 6 when the voltage in
1928
+ 0
1929
+ 5
1930
+ 10
1931
+ 15
1932
+ 20
1933
+ 25
1934
+ 0
1935
+ 1x104
1936
+ 2x104
1937
+ 3x104
1938
+ 4x104
1939
+ Figure 7: Simulated outputs of the local agent controllers
1940
+ with line colours as per the legend in Fig. 4.
1941
+ 0
1942
+ 5
1943
+ 10
1944
+ 15
1945
+ 20
1946
+ 25
1947
+ 0
1948
+ 10
1949
+ 20
1950
+ 30
1951
+ 40
1952
+ Figure 8: Simulated power setpoints with line colours as
1953
+ per the legend in Fig. 4.
1954
+ Fig. 5 is not close to vRef. Note that the voltages of Buses 9
1955
+ and 10 are at 0 V during the respective periods where they
1956
+ are disconnected and not actuated.
1957
+ In Fig. 7, the outputs of the agent controllers show that
1958
+ no synchronisation of the agent controllers are required.
1959
+ The agent controller outputs at Buses 1 to 8, which are
1960
+ continuously connected to the communication network,
1961
+ are near identical. However, the disconnecting buses, e.g.
1962
+ Bus 9 after t = 5 s, rapidly diverge from other controllers
1963
+ and do not synchronise on reconnect. Despite this, the
1964
+ final stage of the controller ensures cooperation of the
1965
+ buses, as demonstrated in the power setpoints p∗
1966
+ k in Fig. 8.
1967
+ When Bus 10 connects at t = 10 s, its setpoint p∗
1968
+ k rapidly
1969
+
1970
+ 13
1971
+ 0
1972
+ 5
1973
+ 10
1974
+ 15
1975
+ 20
1976
+ 25
1977
+ 340
1978
+ 360
1979
+ 380
1980
+ 400
1981
+ 420
1982
+ Figure 9: Simulated bus voltages with ideal PI controllers
1983
+ and with line colours as per the legend in Fig. 4.
1984
+ 0
1985
+ 5
1986
+ 10
1987
+ 15
1988
+ 20
1989
+ 25
1990
+ -40
1991
+ 0
1992
+ 40
1993
+ Figure 10: Simulated weighted voltage errors and the
1994
+ average error of connected agents with ideal PI controllers
1995
+ and with agent line colours as per the legend in Fig. 4.
1996
+ converges to the coordinated common setpoint used by all
1997
+ connected agents.
1998
+ Although the leaky integrators yield imperfect results
1999
+ (see Remark 6 and Fig. 6), this can be mitigated by
2000
+ choosing a higher vRef. Indeed, by combining the steady
2001
+ state of the agent PI controller (27) with the DDA steady
2002
+ state (24), we see that injecting power into the system
2003
+ p∗ > 0 results in positive voltage errors. Since we consider
2004
+ (strictly) passive loads, increasing vRef is thus a viable
2005
+ method for correcting the imperfect results whilst retain-
2006
+ ing the advantageous properties of the stability analysis in
2007
+ Theorem 17.
2008
+ C. Robustness Test
2009
+ We
2010
+ now
2011
+ repeat
2012
+ the
2013
+ simulation
2014
+ described
2015
+ in
2016
+ Section VII-A with the following changes. 1) Passive
2017
+ loads with cL
2018
+ = 0 are allowed at all buses, and 2)
2019
+ ideal agent PI controllers with ζc = 0 are used. Under
2020
+ these conditions, Theorem 17 can no longer be used to
2021
+ verify the stability. However, the stability may still be
2022
+ verified using classical approaches such as evaluating
2023
+ the eigenvalues for the closed loop linearised about the
2024
+ equilibrium. Note that the same random seed is used as
2025
+ for the results in Section VII-A, allowing for a comparison
2026
+ between the scenarios to be made.
2027
+ Fig. 9 demonstrates the improved consensus achieved
2028
+ by the ideal PI agents, in that the bus voltages are
2029
+ closer to vRef at steady state than in Fig. 5. Moreover,
2030
+ 0
2031
+ 5
2032
+ 10
2033
+ 15
2034
+ 20
2035
+ 25
2036
+ 0
2037
+ 1x104
2038
+ 2x104
2039
+ 3x104
2040
+ Figure 11: Simulated outputs of the local agent controllers
2041
+ with ideal PI controllers and with line colours as per the
2042
+ legend in Fig. 4.
2043
+ 0
2044
+ 5
2045
+ 10
2046
+ 15
2047
+ 20
2048
+ 25
2049
+ 0
2050
+ 10
2051
+ 20
2052
+ 30
2053
+ Figure 12: Simulated power setpoints with ideal PI con-
2054
+ trollers and with line colours as per the legend in Fig. 4.
2055
+ Fig. 10 shows that perfect consensus is achieved, where
2056
+ the average error tends to zero in each case. This figure
2057
+ also demonstrates the robustness against communication
2058
+ interruptions, as is the case for Bus 10 which, for the
2059
+ period t ∈ [5 s, 10 s), is actuated but does not communicate
2060
+ with the other buses. Despite this, it is able to accurately
2061
+ regulate its own bus voltage (compared to the imperfect
2062
+ regulation achieved with leaky integrators as in Fig. 5).
2063
+ The lack of leaky integrators is also evident in Fig. 11,
2064
+ where the output of the agent controllers stay constant
2065
+ when a bus is disconnected and not actuated. Lastly, the
2066
+ power setpoints in Fig. 12 converging to a common value
2067
+ for the communicating agents confirm the coordination of
2068
+ the agents.
2069
+ Note that while tests with non-passive loads can also
2070
+ yield a stable closed loop, instability can occur when
2071
+ the non-passive loads dominate. To address this, a tar-
2072
+ geted compensation of non-passive loads is required (see
2073
+ Remark 11).
2074
+ VIII. Conclusion
2075
+ In this paper, we proposed a four-stage distributed
2076
+ control structure that achieves power sharing in a DC mi-
2077
+ crogrid while ensuring voltage regulation for the voltages
2078
+ of both actuated and unactuated buses. We demonstrated
2079
+ how the passivity properties of various subsystems can be
2080
+ determined and combined these in a stability analysis that
2081
+
2082
+ 14
2083
+ is independent of topological changes, actuation changes,
2084
+ bus connections or disconnections and load changes.
2085
+ Future work includes the consideration of non-passive
2086
+ loads at arbitrary locations in the microgrid and the
2087
+ construction of an interface to allow for the presented work
2088
+ to be combined with tertiary optimal controllers.
2089
+ Appendix A
2090
+ Proofs
2091
+ Proof of Prop. 8. For the control structure in steady state,
2092
+ ˙xc = 0 and thus yc is constant. The steady-state output
2093
+ (24) of the Stage 4 DDA therefore ensures Objective 2 is
2094
+ achieved. Furthermore, consider the steady state of the
2095
+ Stage 2 DDA
2096
+ ua,s,k = lim
2097
+ t→∞ hw(vRef − vk),
2098
+ (64)
2099
+ lim
2100
+ t→∞ ya,2,k = uT
2101
+ a,s1N
2102
+ N
2103
+ = lim
2104
+ t→∞
2105
+ 1
2106
+ N
2107
+
2108
+ k∈N
2109
+ (vRef − h(vk)) , (65)
2110
+ if vk is in equilibrium and where h is obtained by shifting
2111
+ hw by vRef. Note that (65) corresponds to the condition of
2112
+ (21) in Objective 1. Therefore, ya,2 specifies the regulation
2113
+ error of the average weighted voltage error in steady
2114
+ state. From the steady state of the agent PI controller
2115
+ in (26), we have ζcxc = ya,2. Thus, ideal integrators with
2116
+ ζc = 0 ensure that Objective 1 is met exactly. For ζc > 0,
2117
+ substitute the PI equilibrium into the output of the agent
2118
+ PI controller in (26) to obtain the steady state equation
2119
+ xc = 1
2120
+ kIc
2121
+
2122
+ yc + kP
2123
+ c ya,2
2124
+
2125
+ .
2126
+ (66)
2127
+ Substitute ζcxc = ya,2 into (24) and simplify to find
2128
+ ya,2 =
2129
+ ζc
2130
+ kIc(1 + ζckPc )yc,
2131
+ (67)
2132
+ for the steady state. Since the entries of the vector ya,2 and
2133
+ thus of xc and yc are the same at steady state. Therefore
2134
+ the steady state output for the Stage 4 DDA in (24) gives
2135
+ yc = ya,4, which we combine with (67) to obtain the error
2136
+ for Objective 1 in (30).
2137
+
2138
+ Proof of Theorem 14. Consider the supply rates which de-
2139
+ scribe the actuated and unactuated states, respectively, for
2140
+ a given bus k ∈ N
2141
+ wM,α,k = (1+νd,1ρd)˜p∗
2142
+ α,k˜vα,k − νd,1(˜p∗
2143
+ α,k)2 − ρd˜v2
2144
+ α,k, (68)
2145
+ wM,β,k = −ρL˜v2
2146
+ β,k.
2147
+ (69)
2148
+ These allow the microgrid supply rate in (47) to be
2149
+ decomposed according to the actuation states αk
2150
+ wM,αβ =
2151
+
2152
+ k∈Nα
2153
+ wM,α,k +
2154
+
2155
+ k∈Nβ
2156
+ wM,β,k
2157
+ =
2158
+
2159
+ k∈N
2160
+ (αkwM,α,k + (1 − αk)wM,β,k)
2161
+ (70)
2162
+ u
2163
+ y
2164
+ wM,α,k
2165
+ wM,α,k
2166
+ wM,β,k/ρL
2167
+ Figure 13: Comparison of the microgrid supply rate sectors
2168
+ in the proof of Theorem 14 if ρd < 0.
2169
+ Enlarge the supply rate of the unactuated buses in (69) by
2170
+ adding the positive term νL(˜p∗
2171
+ β,k)2 for an arbitrarily small
2172
+ νL > 0 such that
2173
+ wM,β,k ≤ wM,β,k = νL(˜p∗
2174
+ β,k)2 − ρL˜v2
2175
+ β,k
2176
+ ≤ wM,β,k
2177
+ ρL
2178
+ = νL
2179
+ ρL
2180
+ (˜p∗
2181
+ β,k)2 − ˜v2
2182
+ β,k
2183
+ (71)
2184
+ for ρL as in (53). The supply rate wM,β,k/ρL is equivalent
2185
+ to the L2 supply rate in Definition 2 and is thus bounded
2186
+ by the sector [−
2187
+
2188
+ νL
2189
+ ρL ,
2190
+
2191
+ νL
2192
+ ρL ] [29, Lemma 4]. Consider now
2193
+ the supply rate of the actuated agents (68) narrowed down
2194
+ to an IFP sector for the case that ρd < 0, i.e.
2195
+ wM,α,k ≥ wM,α,k =
2196
+
2197
+ wM,α,k,
2198
+ if ρd ≥ 0,
2199
+ ˜p∗
2200
+ α,k˜vα,k − νd,1(˜p∗
2201
+ α,k)2,
2202
+ if ρd < 0,
2203
+ (72)
2204
+ such that wM,α,k is sector bounded by [νd,1, 1
2205
+ ρd ] if ρd > 0
2206
+ and [νd,1, ∞) if ρd < 0 or if ρd = 0 (see [26, p. 231]). A re-
2207
+ lation bewteen wM,α and wM,β/ρL can now be established
2208
+ by comparing their respective sector bounds:
2209
+ wM,β,k
2210
+ ρL
2211
+ ≤ wM,α,k if
2212
+
2213
+
2214
+
2215
+ [−
2216
+
2217
+ νL
2218
+ ρL ,
2219
+
2220
+ νL
2221
+ ρL ] ⊆ [νd,1, 1
2222
+ ρd ], if ρd > 0,
2223
+ [−
2224
+
2225
+ νL
2226
+ ρL ,
2227
+
2228
+ νL
2229
+ ρL ] ⊆ [νd,1, ∞), if ρd ≤ 0,
2230
+ (73)
2231
+ Since νL can be arbitrarily small, we derive (54) by
2232
+ comparing the lower bounds in (73) and note that the
2233
+ upper bound relation can be met for any ρd. A visual
2234
+ comparison of the sector conditions is made in Fig. 13.
2235
+ The combination of (71)–(73) results in
2236
+ wM,β,k ≤ wM,β,k ≤ wM,β,k
2237
+ ρL
2238
+ ≤ wM,α,k ≤ wM,α,k.
2239
+ (74)
2240
+ Therefore, for the microgrid with the storage function SM
2241
+ that is dissipative w.r.t. (47), it holds that
2242
+ ˙SM ≤ wM,αβ ≤
2243
+
2244
+ k∈N
2245
+ wM,α,k = wM,
2246
+ (75)
2247
+ which is found by combining (70) with (74).
2248
+
2249
+ Appendix B
2250
+ Simulation Data
2251
+ The
2252
+ simulation
2253
+ parameters
2254
+ used
2255
+ for
2256
+ the
2257
+ lines
2258
+ in
2259
+ Section VII are given in Table III. Furthermore, the
2260
+
2261
+ 15
2262
+ Table III: Rounded Line Lengths
2263
+ Line
2264
+ Length
2265
+ Line
2266
+ Length
2267
+ Line
2268
+ Length
2269
+ 1 – 2
2270
+ 1.19 km
2271
+ 1 – 4
2272
+ 7.74 km
2273
+ 2 – 3
2274
+ 2.23 km
2275
+ 2 – 4
2276
+ 7.20 km
2277
+ 3 – 5
2278
+ 3.14 km
2279
+ 3 – 8
2280
+ 2.82 km
2281
+ 4 – 5
2282
+ 3.72 km
2283
+ 4 – 6
2284
+ 6.75 km
2285
+ 4 – 7
2286
+ 1.16 km
2287
+ 6 – 7
2288
+ 4.44 km
2289
+ 6 – 9
2290
+ 3.11 km
2291
+ 7 – 8
2292
+ 3.69 km
2293
+ 8 – 10
2294
+ 1.21 km
2295
+ Table IV: Strictly Passive Load Values
2296
+ Bus Parameter
2297
+ t = 0 s
2298
+ t = 5 s
2299
+ t = 10 s
2300
+ t = 15 s
2301
+ t = 20 s
2302
+ Z−1 (1/Ω)
2303
+ 0.103
2304
+ 0.103
2305
+ 0.106
2306
+ 0.106
2307
+ 0.083
2308
+ 1
2309
+ I (A)
2310
+ 4.66
2311
+ 2.15
2312
+ -6.08
2313
+ -6.08
2314
+ 14.45
2315
+ P (W)
2316
+ 3599
2317
+ -4055
2318
+ 4133
2319
+ 4133
2320
+ -4927
2321
+ Z−1 (1/Ω)
2322
+ 0.099
2323
+ 0.099
2324
+ 0.096
2325
+ 0.096
2326
+ 0.080
2327
+ 2
2328
+ I (A)
2329
+ -16.09
2330
+ -16.09
2331
+ 19.68
2332
+ 19.68
2333
+ 2.49
2334
+ P (W)
2335
+ 3204
2336
+ 3204
2337
+ 2659
2338
+ 2659
2339
+ 1346
2340
+ Z−1 (1/Ω)
2341
+ 0.128
2342
+ 0.105
2343
+ 0.105
2344
+ 0.105
2345
+ 0.096
2346
+ 3
2347
+ I (A)
2348
+ 10.27
2349
+ -0.09
2350
+ -0.09
2351
+ -0.09
2352
+ -11.09
2353
+ P (W)
2354
+ -1479
2355
+ -3659
2356
+ -3659
2357
+ -3659
2358
+ 3031
2359
+ Z−1 (1/Ω)
2360
+ 0.079
2361
+ 0.079
2362
+ 0.079
2363
+ 0.079
2364
+ 0.079
2365
+ 4
2366
+ I (A)
2367
+ 10.15
2368
+ 10.15
2369
+ 10.15
2370
+ 10.15
2371
+ 10.15
2372
+ P (W)
2373
+ -2711
2374
+ -2711
2375
+ -2711
2376
+ -2711
2377
+ -2711
2378
+ Z−1 (1/Ω)
2379
+ 0.095
2380
+ 0.095
2381
+ 0.095
2382
+ 0.064
2383
+ 0.107
2384
+ 5
2385
+ I (A)
2386
+ -6.64
2387
+ -6.64
2388
+ -6.64
2389
+ 16.68
2390
+ 2.10
2391
+ P (W)
2392
+ 2768
2393
+ 2768
2394
+ 2768
2395
+ -3798
2396
+ 4242
2397
+ Z−1 (1/Ω)
2398
+ 0.089
2399
+ 0.089
2400
+ 0.106
2401
+ 0.103
2402
+ 0.103
2403
+ 6
2404
+ I (A)
2405
+ 6.87
2406
+ 6.87
2407
+ 7.85
2408
+ -5.17
2409
+ -5.17
2410
+ P (W)
2411
+ 948
2412
+ 948
2413
+ 4321
2414
+ 370
2415
+ 370
2416
+ Z−1 (1/Ω)
2417
+ 0.065
2418
+ 0.092
2419
+ 0.092
2420
+ 0.118
2421
+ 0.118
2422
+ 7
2423
+ I (A)
2424
+ 11.96
2425
+ 6.51
2426
+ 6.51
2427
+ 2.77
2428
+ 2.77
2429
+ P (W)
2430
+ -3624
2431
+ -3442
2432
+ -3442
2433
+ -3890
2434
+ -3890
2435
+ Z−1 (1/Ω)
2436
+ 0.102
2437
+ 0.102
2438
+ 0.086
2439
+ 0.086
2440
+ 0.124
2441
+ 8
2442
+ I (A)
2443
+ -16.85
2444
+ -16.85
2445
+ 20.71
2446
+ 20.71
2447
+ -4.68
2448
+ P (W)
2449
+ 3529
2450
+ 3529
2451
+ -4773
2452
+ -4773
2453
+ -3832
2454
+ Z−1 (1/Ω)
2455
+ 0.111
2456
+ 0.103
2457
+ 0.109
2458
+ 0.077
2459
+ 0.077
2460
+ 9
2461
+ I (A)
2462
+ 13.79
2463
+ -19.74
2464
+ 9.53
2465
+ 1.26
2466
+ 1.26
2467
+ P (W)
2468
+ -2645
2469
+ 1830
2470
+ 4215
2471
+ 1549
2472
+ 1549
2473
+ Z−1 (1/Ω)
2474
+ 0.072
2475
+ 0.100
2476
+ 0.100
2477
+ 0.111
2478
+ 0.111
2479
+ 10
2480
+ I (A)
2481
+ 7.77
2482
+ 9.02
2483
+ 9.02
2484
+ 10.98
2485
+ 10.98
2486
+ P (W)
2487
+ -3538
2488
+ -4143
2489
+ -4143
2490
+ -2795
2491
+ -2795
2492
+ strictly passive load parameters for the simulation results
2493
+ in Section VII-B and the passive load parameters for
2494
+ the results in Section VII-C are given in Table IV and
2495
+ Table V, respectively. Note that the P parameter for the
2496
+ loads in Table V are the same as listed in Table IV.
2497
+ References
2498
+ [1] B. Lasseter, “Microgrids [distributed power generation],” in
2499
+ Proc. 2001 IEEE Power Engineering Society Winter Meeting,
2500
+ vol. 1, 2001, pp. 146–149.
2501
+ [2] J. J. Justo, F. Mwasilu, J. Lee, and J.-W. Jung, “AC-microgrids
2502
+ versus DC-microgrids with distributed energy resources: A re-
2503
+ view,” Renewable and Sustainable Energy Reviews, vol. 24, pp.
2504
+ 387–405, 2013.
2505
+ [3] L. Meng, Q. Shafiee, G. F. Trecate, H. Karimi, D. Fulwani,
2506
+ X. Lu, and J. M. Guerrero, “Review on control of DC microgrids
2507
+ and multiple microgrid clusters,” IEEE J. of Emerging and
2508
+ Selected Topics in Power Electron., vol. 5, no. 3, pp. 928–948,
2509
+ 2017.
2510
+ Table V: Passive Load Values, P as in Table IV
2511
+ Bus Parameter
2512
+ t = 0 s
2513
+ t = 5 s
2514
+ t = 10 s
2515
+ t = 15 s
2516
+ t = 20 s
2517
+ 1
2518
+ Z−1 (1/Ω)
2519
+ 0.091
2520
+ 0.093
2521
+ 0.087
2522
+ 0.087
2523
+ 0.063
2524
+ I (A)
2525
+ 4.66
2526
+ -8.15
2527
+ -6.08
2528
+ -6.08
2529
+ 9.71
2530
+ 2
2531
+ Z−1 (1/Ω)
2532
+ 0.069
2533
+ 0.069
2534
+ 0.071
2535
+ 0.071
2536
+ 0.046
2537
+ I (A)
2538
+ -16.09
2539
+ -16.09
2540
+ 19.68
2541
+ 19.68
2542
+ 0.20
2543
+ 3
2544
+ Z−1 (1/Ω)
2545
+ 0.095
2546
+ 0.082
2547
+ 0.082
2548
+ 0.082
2549
+ 0.059
2550
+ I (A)
2551
+ 8.91
2552
+ -7.12
2553
+ -7.12
2554
+ -7.12
2555
+ -11.09
2556
+ 4
2557
+ Z−1 (1/Ω)
2558
+ 0.038
2559
+ 0.038
2560
+ 0.038
2561
+ 0.038
2562
+ 0.038
2563
+ I (A)
2564
+ 8.82
2565
+ 8.82
2566
+ 8.82
2567
+ 8.82
2568
+ 8.82
2569
+ 5
2570
+ Z−1 (1/Ω)
2571
+ 0.065
2572
+ 0.065
2573
+ 0.065
2574
+ 0.027
2575
+ 0.078
2576
+ I (A)
2577
+ -6.64
2578
+ -6.64
2579
+ -6.64
2580
+ 15.25
2581
+ 2.10
2582
+ 6
2583
+ Z−1 (1/Ω)
2584
+ 0.071
2585
+ 0.071
2586
+ 0.089
2587
+ 0.102
2588
+ 0.102
2589
+ I (A)
2590
+ 4.04
2591
+ 4.04
2592
+ 7.85
2593
+ -9.19
2594
+ -9.19
2595
+ 7
2596
+ Z−1 (1/Ω)
2597
+ 0.029
2598
+ 0.070
2599
+ 0.070
2600
+ 0.079
2601
+ 0.079
2602
+ I (A)
2603
+ 9.04
2604
+ 0.89
2605
+ 0.89
2606
+ 0.58
2607
+ 0.58
2608
+ 8
2609
+ Z−1 (1/Ω)
2610
+ 0.075
2611
+ 0.075
2612
+ 0.057
2613
+ 0.057
2614
+ 0.111
2615
+ I (A)
2616
+ -16.85
2617
+ -16.85
2618
+ 20.55
2619
+ 20.55
2620
+ -14.31
2621
+ 9
2622
+ Z−1 (1/Ω)
2623
+ 0.105
2624
+ 0.102
2625
+ 0.061
2626
+ 0.036
2627
+ 0.036
2628
+ I (A)
2629
+ 10.71
2630
+ -19.75
2631
+ 9.53
2632
+ -0.05
2633
+ -0.05
2634
+ 10 Z−1 (1/Ω)
2635
+ 0.042
2636
+ 0.091
2637
+ 0.091
2638
+ 0.088
2639
+ 0.088
2640
+ I (A)
2641
+ 2.53
2642
+ 2.03
2643
+ 2.03
2644
+ 8.34
2645
+ 8.34
2646
+ [4] V. Nasirian, S. Moayedi, A. Davoudi, and F. L. Lewis, “Distrib-
2647
+ uted cooperative control of DC microgrids,” IEEE Trans. Power
2648
+ Electron., vol. 30, no. 4, pp. 2288–2303, 2015.
2649
+ [5] M. Tucci, L. Meng, J. M. Guerrero, and G. Ferrari-Trecate,
2650
+ “Stable current sharing and voltage balancing in DC mi-
2651
+ crogrids: A consensus-based secondary control layer,” Automat-
2652
+ ica, vol. 95, pp. 1–13, 2018.
2653
+ [6] J. Zhao and F. Dörfler, “Distributed control and optimization
2654
+ in dc microgrids,” Automatica, vol. 61, pp. 18–26, 2015.
2655
+ [7] T. Dragičević, X. Lu, J. C. Vasquez, and J. M. Guerrero,
2656
+ “DC microgrids—part i: A review of control strategies and
2657
+ stabilization techniques,” IEEE Trans. Power Electron., vol. 31,
2658
+ no. 7, pp. 4876–4891, 2016.
2659
+ [8] J. Kumar, A. Agarwal, and V. Agarwal, “A review on overall
2660
+ control of dc microgrids,” J. of Energy Storage, vol. 21, pp. 113–
2661
+ 138, 2019.
2662
+ [9] M. Tucci, S. Riverso, J. C. Vasquez, J. M. Guerrero, and
2663
+ G. Ferrari-Trecate, “A
2664
+ decentralized
2665
+ scalable approach to
2666
+ voltage control of DC islanded microgrids,” IEEE Trans. Con-
2667
+ trol Syst. Technol., vol. 24, no. 6, pp. 1965–1979, 2016.
2668
+ [10] F. Strehle, M. Pfeifer, A. J. Malan, S. Krebs, and S. Hohmann,
2669
+ “A scalable port-Hamiltonian approach to plug-and-play voltage
2670
+ stabilization in DC microgrids,” in 2020 IEEE Conf. Control
2671
+ Technol. and Applications, 2020, pp. 787–794.
2672
+ [11] M. Cucuzzella, K. C. Kosaraju, and J. M. A. Scherpen, “Voltage
2673
+ control of DC microgrids: Robustness for unknown ZIP-loads,”
2674
+ IEEE Control Syst. Lett., vol. 7, pp. 139–144, 2023.
2675
+ [12] S. Trip, M. Cucuzzella, X. Cheng, and J. Scherpen, “Distributed
2676
+ averaging control for voltage regulation and current sharing in
2677
+ DC microgrids,” IEEE Control Syst. Lett., vol. 3, no. 1, pp. 174–
2678
+ 179, 2019.
2679
+ [13] M. Cucuzzella, S. Trip, C. De Persis, X. Cheng, A. Ferrara, and
2680
+ A. van der Schaft, “A robust consensus algorithm for current
2681
+ sharing and voltage regulation in DC microgrids,” IEEE Trans.
2682
+ Control Syst. Technol., vol. 27, no. 4, pp. 1583–1595, 2019.
2683
+ [14] M. S. Sadabadi, S. Sahoo, and F. Blaabjerg, “Stability-oriented
2684
+ design of cyberattack-resilient controllers for cooperative DC
2685
+ microgrids,” IEEE Trans. Power Electron., vol. 37, no. 2, pp.
2686
+ 1310–1321, 2022.
2687
+ [15] R. Han, L. Meng, J. M. Guerrero, and J. C. Vasquez, “Dis-
2688
+ tributed nonlinear control with event-triggered communication
2689
+ to achieve current-sharing and voltage regulation in DC mi-
2690
+ crogrids,” IEEE Trans. Power Electron., vol. 33, no. 7, pp. 6416–
2691
+ 6433, 2018.
2692
+ [16] P. Nahata and G. Ferrari-Trecate, “On existence of equilibria,
2693
+
2694
+ 16
2695
+ voltage balancing, and current sharing in consensus-based DC
2696
+ microgrids,” in Proc. Eur. Control Conf. (ECC), 2020, pp. 1216–
2697
+ 1223.
2698
+ [17] P. Nahata, M. S. Turan, and G. Ferrari-Trecate, “Consensus-
2699
+ based current sharing and voltage balancing in dc microgrids
2700
+ with exponential loads,” IEEE Trans. Control Syst. Technol.,
2701
+ vol. 30, no. 4, pp. 1668–1680, 2022.
2702
+ [18] C. De Persis, E. Weitenberg, and F. Dörfler, “A power consensus
2703
+ algorithm for DC microgrids,” IFAC-PapersOnLine, vol. 50,
2704
+ no. 1, pp. 10 009–10 014, 2017, 20th IFAC World Congress.
2705
+ [19] B. Fan, S. Guo, J. Peng, Q. Yang, W. Liu, and L. Liu, “A
2706
+ consensus-based algorithm for power sharing and voltage reg-
2707
+ ulation in dc microgrids,” IEEE Trans. Ind. Inform., vol. 16,
2708
+ no. 6, pp. 3987–3996, 2020.
2709
+ [20] M. Cucuzzella, K. C. Kosaraju, and J. M. A. Scherpen, “Dis-
2710
+ tributed passivity-based control of DC microgrids,” in American
2711
+ Control Conf. (ACC), 2019, pp. 652–657.
2712
+ [21] F. Dörfler and F. Bullo, “Kron reduction of graphs with ap-
2713
+ plications to electrical networks,” IEEE Trans. Circuits Syst.,
2714
+ vol. 60, no. 1, pp. 150–163, 2013.
2715
+ [22] W. Chen, D. Wang, J. Liu, Y. Chen, S. Z. Khong, T. Başar,
2716
+ K. H. Johansson, and L. Qiu, “On spectral properties of signed
2717
+ laplacians with connections to eventual positivity,” IEEE Trans.
2718
+ Autom. Control, vol. 66, no. 5, pp. 2177–2190, 2021.
2719
+ [23] A. J. Malan, M. Pfeifer, and S. Hohmann, “Distributed coordin-
2720
+ ation of physically-interconnected multi-agent systems with ac-
2721
+ tuated and unactuated agents,” Eur. J. Control, p. 100673,
2722
+ 2022.
2723
+ [24] A. J. van der Schaft, L2-Gain and Passivity Techniques in
2724
+ Nonlinear Control, 3rd ed.
2725
+ Cham, Switzerland: Springer, 2017.
2726
+ [25] M. Arcak and E. D. Sontag, “Diagonal stability of a class of
2727
+ cyclic systems and its connection with the secant criterion,”
2728
+ Automatica, vol. 42, no. 9, pp. 1531–1537, 2006.
2729
+ [26] H. K. Khalil, Nonlinear Systems, 3rd ed.
2730
+ Upper Saddle River,
2731
+ NJ: Prentice Hall, 2002.
2732
+ [27] G. H. H. Hines, M. Arcak, and A. K. Packard, “Equilibrium-
2733
+ independent passivity: A new definition and numerical certific-
2734
+ ation,” Automatica, vol. 47, no. 9, pp. 1949–1956, 2011.
2735
+ [28] M. Arcak, C. Meissen, and A. Packard, Networks of Dissipative
2736
+ Systems: Compositional Certification of Stability, Performance,
2737
+ and Safety, ser. (SpringerBriefs in Control, Automation and
2738
+ Robotics).
2739
+ New York, NY, USA: Springer, 2016.
2740
+ [29] A. J. Malan, P. Jané-Soneira, and S. Hohmann, “Constructive
2741
+ analysis and design of interconnected krasovskii passive and
2742
+ quadratic dissipative systems,” in Proc. 61th IEEE Conf. Decis.
2743
+ Control (CDC), 2022.
2744
+ [30] P. Moylan and D. Hill, “Stability criteria for large-scale sys-
2745
+ tems,” IEEE Trans. Autom. Control, vol. 23, no. 2, pp. 143–149,
2746
+ 1978.
2747
+ [31] M. Benzi, G. H. Golub, and J. Liesen, “Numerical solution of
2748
+ saddle point problems,” Acta Numerica, vol. 14, p. 1–137, 2005.
2749
+ [32] F. Strehle, A. J. Malan, S. Krebs, and S. Hohmann, “Passivity
2750
+ conditions for plug-and-play operation of nonlinear static AC
2751
+ loads,” IFAC-PapersOnLine, vol. 53, no. 2, pp. 12 237–12 243,
2752
+ 2020, 21st IFAC World Congress.
2753
+ [33] J. Machowski, J. W. Bialek, and J. R. Bumby, Power System
2754
+ Dynamics: Stability and Control, 2nd ed.
2755
+ Chichester, United
2756
+ Kingdom: John Wiley & Sons, Ltd., 2008.
2757
+ [34] R. A. Freeman, P. Yang, and K. M. Lynch, “Stability and con-
2758
+ vergence properties of dynamic average consensus estimators,”
2759
+ in Proc. 45th IEEE Conf. Decis. Control (CDC), 2006, pp. 338–
2760
+ 343.
2761
+ [35] E. Weitenberg, Y. Jiang, C. Zhao, E. Mallada, F. Dörfler, and
2762
+ C. De Persis, “Robust decentralized frequency control: A leaky
2763
+ integrator approach,” in Proc. Eur. Control Conf. (ECC), 2018,
2764
+ pp. 764–769.
2765
+ [36] T. Raff, C. Ebenbauer, and P. Allgöwer, Nonlinear Model Pre-
2766
+ dictive Control: A Passivity-Based Approach.
2767
+ Berlin, Heidel-
2768
+ berg: Springer, 2007, pp. 151–162.
2769
+
dNFRT4oBgHgl3EQfTjeD/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
dtE3T4oBgHgl3EQfegpr/content/tmp_files/2301.04544v1.pdf.txt ADDED
@@ -0,0 +1,1004 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.04544v1 [cs.GT] 11 Jan 2023
2
+ Optimal Impartial Correspondences
3
+ Javier Cembrano∗
4
+ Felix Fischer†
5
+ Max Klimm‡
6
+ Abstract
7
+ We study mechanisms that select a subset of the vertex set of a directed graph in order to
8
+ maximize the minimum indegree of any selected vertex, subject to an impartiality constraint
9
+ that the selection of a particular vertex is independent of the outgoing edges of that vertex.
10
+ For graphs with maximum outdegree d, we give a mechanism that selects at most d + 1
11
+ vertices and only selects vertices whose indegree is at least the maximum indegree in the
12
+ graph minus one. We then show that this is best possible in the sense that no impartial
13
+ mechanism can only select vertices with maximum degree, even without any restriction
14
+ on the number of selected vertices. We finally obtain the following trade-off between the
15
+ maximum number of vertices selected and the minimum indegree of any selected vertex:
16
+ when selecting at most k vertices out of n, it is possible to only select vertices whose indegree
17
+ is at least the maximum indegree minus ⌊(n − 2)/(k − 1)⌋ + 1.
18
+ 1
19
+ Introduction
20
+ Impartial selection is the problem of selecting vertices with large indegree in a directed graph,
21
+ in such a way that the selection of a particular vertex is independent of the outgoing edges of
22
+ that vertex. The problem models a situation where agents nominate one another for selection
23
+ and are willing to offer their true opinion on other agents as long as this does not affect their
24
+ own chance of being selected.
25
+ The selection of a single vertex is governed by strong impossibility results. For graphs with
26
+ maximum outdegree one, corresponding to situations where each agent submits a single nomi-
27
+ nation, every impartial selection rule violates one of two basic axioms [11] and as a consequence
28
+ must fail to provide a non-trivial multiplicative approximation to the maximum indegree. For
29
+ graphs with arbitrary outdegrees, corresponding to situations where each agent can submit mul-
30
+ tiple nominations, impartial rules violate an even weaker axiom and cannot provide a non-trivial
31
+ approximation in a multiplicative or additive sense [1, 8]. These impossibilities largely remain in
32
+ place if rather than a single vertex we want to select any fixed number of vertices, but positive
33
+ results can be obtained if we relax the requirement that the same number of vertices must be
34
+ selected in every graph [4, 18].
35
+ From a practical point of view, the need for such a relaxation should not necessarily be a
36
+ cause for concern. Indeed, situations in the real world to which impartial selection is relevant
37
+ often allow for a certain degree of flexibility in the number of selected agents. The exact number
38
+ of papers accepted to an academic conference is usually not fixed in advance but depends on
39
+ the number and quality of submissions. Best paper awards at conferences are often given in
40
+ overlapping categories, and some awards may only be given if this is warranted by the field of
41
+ candidates. The Fields medal is awarded every four years to two, three, or four mathematicians
42
+ under the age of 40. Examples at the more extreme end of the spectrum of flexibility include
43
+ the award of job titles such as vice president or deputy vice-principal. Such titles can often be
44
+ ∗Institut für Mathematik, Technische Universität Berlin, Germany
45
+ †School of Mathematical Sciences, Queen Mary University of London, UK
46
+ ‡Institut für Mathematik, Technische Universität Berlin, Germany
47
+ 1
48
+
49
+ given to a large number of individuals at a negligible cost per individual, but should only be
50
+ given to qualified individuals so as not to devalue the title.
51
+ Tamura and Ohseto [18] specifically studied what they call nomination correspondences,
52
+ i.e., rules that may select an arbitrary set of vertices in any graph. For graphs with maximum
53
+ outdegree one a particular such rule, plurality with runners-up, satisfies impartiality and ap-
54
+ propriate versions of the two axioms of Holzman and Moulin [11]. The rule selects any vertex
55
+ with maximum indegree; if there is a unique such vertex, any vertex whose indegree is smaller
56
+ by one and whose outgoing edge goes to the vertex with maximum indegree is selected as well.
57
+ An appropriate measure for the quality of rules that select varying numbers of vertices is the
58
+ difference in the worst case between the best vertex and the worst selected vertex, and we can
59
+ call a rule α-min-additive if the maximum difference, taken over all graphs, between these two
60
+ quantities is at most α. In this terminology, plurality with runners-up is 1-min-additive.
61
+ As Tamura and Ohseto point out, it may be desirable in practice to ensure that the maximum
62
+ number of vertices selected is not too large, a property that plurality with runners-up clearly
63
+ fails. It is therefore interesting to ask whether there exist rules that are α-min-additive and
64
+ never select more than k vertices, for some fixed α and k.
65
+ For graphs with outdegree one,
66
+ Tamura and Ohseto answer this question in the affirmative: a variant of plurality with runners-
67
+ up that breaks ties according to a fixed ordering of the vertices remains 1-min-additive but never
68
+ selects more than two vertices.
69
+ Our Contribution
70
+ Our first result provides a generalization of the result of Tamura and Ohseto
71
+ to graphs with larger outdegrees: for graphs with maximum outdegree d, it is possible to achieve
72
+ 1-min-additivity while selecting at most d+1 vertices. For the particular case of graphs with un-
73
+ bounded outdegrees we obtain a slight improvement, by guaranteeing 1-min-additivity without
74
+ ever selecting all vertices. Our second result establishes that 1-min-additivity is best possible,
75
+ thus ruling out the existence of impartial mechanisms that only select vertices with maximum in-
76
+ degree. This holds even when no restrictions are imposed on the number of selected vertices, and
77
+ is shown alongside analogous impossibility results concerning the maximization of the median or
78
+ mean indegree of the selected vertices instead of their minimum indegree. Our third result pro-
79
+ vides a trade-off between the maximum number of vertices selected, where smaller is better, and
80
+ the minimum indegree of any selected vertex, where larger is better: if we are allowed to select
81
+ at most k vertices out of n, we can guarantee α-min-additivity for α = ⌊(n−2)/(k−1)⌋+1. This
82
+ is achieved by removing a subset of the edges from the graph before plurality with runners-up
83
+ is applied, in order to guarantee impartiality while selecting fewer vertices. We do not know
84
+ whether this last result is tight and leave open the interesting question for the optimal trade-off
85
+ between the number and quality of selected vertices.
86
+ Related Work
87
+ Impartiality as a property of an economic mechanism was introduced by
88
+ de Clippel et al. [9], and first applied to the selection of vertices in a directed graph by Alon et al.
89
+ [1] and Holzman and Moulin [11]. Whereas Holzman and Moulin gave axiomatic characteriza-
90
+ tions for mechanisms selecting a single vertex when all outdegrees are equal to one, Alon et al.
91
+ studied the ability of impartial mechanisms to approximate the maximum indegree for any fixed
92
+ number of vertices when there are no limitations on outdegrees.
93
+ Both sets of authors obtained strong impossibility results, which a significant amount of
94
+ follow-up work has since sought to overcome. Randomized mechanisms providing non-trivial
95
+ multiplicative guarantees had already been proposed by Alon et al., and Fischer and Klimm [10]
96
+ subsequently achieved the best possible such guarantee for the selection of one vertex. Starting
97
+ from the observation that worst-case instances for randomized mechanisms have small indegrees,
98
+ Bousquet et al. [5] developed a mechanism that is asymptotically optimal as the maximum
99
+ indegree grows, and Caragiannis et al. [6, 7] initiated the study of mechanisms providing additive
100
+ rather than multiplicative guarantees. Cembrano et al. [8] subsequently identified a deterministic
101
+ 2
102
+
103
+ mechanism that provides non-trivial additive guarantees whenever the maximum outdegree is
104
+ bounded and established that no such guarantees can be obtained with unbounded outdegrees.
105
+ Randomized mechanisms have been also studied from an axiomatic point of view by Mackenzie
106
+ [14, 15].
107
+ Bjelde et al. [4] gave randomized mechanisms with improved multiplicative guarantees for the
108
+ selection of more than one vertex and observed that when selecting at most k vertices rather than
109
+ exactly k, deterministic mechanisms can in fact achieve non-trivial guarantees. An axiomatic
110
+ study of Tamura and Ohseto [18] for the outdegree-one case came to the same conclusion:
111
+ when allowing for the selection of a varying number of vertices, the impossibility result of
112
+ Holzman and Moulin no longer holds.
113
+ Tamura [17] subsequently characterized a mechanism
114
+ proposed by Tamura and Ohseto, which in some cases selects all vertices, as the unique minimal
115
+ mechanism satisfying impartiality, anonymity, symmetry, and monotonicity.
116
+ Impartial mechanisms have finally been proposed for various problems other than selection,
117
+ including peer review [2, 13, 16, 20], rank aggregation [12], progeny maximization [3, 21], and
118
+ network centralities [19].
119
+ 2
120
+ Preliminaries
121
+ For n ∈ N, let [n] = {1, 2, . . . , n}, and let
122
+ Gn =
123
+
124
+ (V, E) : V = [n], E ⊆ (V × V ) \
125
+
126
+ v∈V
127
+ {(v, v)}
128
+
129
+ be the set of directed graphs with n vertices and no loops. Let G = �
130
+ n∈N Gn. For G = (V, E) ∈ G
131
+ and v ∈ V , let N +(v, G) = {u ∈ V : (v, u) ∈ E} be the out-neighborhood and N −(v, G) =
132
+ {u ∈ V : (u, v) ∈ E} the in-neighborhood of v in G. Let δ+(v, G) = |N +(v, G)| and δ−(v, G) =
133
+ |N −(v, G)| denote the outdegree and indegree of v in G, and ∆(G) = maxv∈V δ−(v, G) the
134
+ maximum indegree of any vertex in G.
135
+ When the graph is clear from the context, we will
136
+ sometimes drop G from the notation and write N +(v), N −(v), δ+(v), δ−(v), and ∆.
137
+ Let
138
+ top(G) = max{v ∈ V : δ−(v) = ∆(G)} denote the vertex of G with the largest index among
139
+ those with maximum indegree.
140
+ For n ∈ N and d ∈ [n − 1], let Gn(d) = {(V, E) ∈ Gn :
141
+ δ+(v) ≤ d for every v ∈ V } be the set of graphs in Gn with maximum outdegree at most d, and
142
+ G(d) = �
143
+ n∈N Gn(d).
144
+ A k-selection mechanism is then given by a family of functions f : Gn → 2[n], one for
145
+ each n ∈ N, mapping each graph to a subset of its vertices, where we require that |f(G)| ≤ k
146
+ for all G ∈ G. In a slight abuse of notation, we will use f to refer to both the mechanism
147
+ and to individual functions of the family. Given G = (V, E) ∈ G and v ∈ V , let Nv(G) =
148
+ {(V, E′) ∈ G :
149
+ E \ ({v} × V ) = E′ \ ({v} × V )} be the set neighboring graphs of G with
150
+ respect to v, in the sense that they can be obtained from G by changing the outgoing edges
151
+ of v. Mechanism f is impartial on G′ ⊆ G if on this set of graphs the outgoing edges of a
152
+ vertex have no influence on its selection, i.e., if for every graph G = (V, E) ∈ G′, v ∈ V , and
153
+ G′ ∈ Nv(G), it holds that f(G) ∩ {v} = f(G′) ∩ {v}. Given a k-selection mechanism f and
154
+ an aggregator function σ : 2R → R such that σ(∅) = 0 and, for every S ⊆ R with |S| ≥ 1,
155
+ min{x ∈ S} ≤ σ(S) ≤ max{x ∈ S}, we say that f is α-σ-additive on G′ ⊆ G, for α ≥ 0, if for
156
+ every graph in G′ the function σ evaluated on the choice of f differs from the maximum indegree
157
+ by at most α, i.e., if
158
+ sup
159
+ G∈G′
160
+
161
+ ∆(G) − σ
162
+
163
+ {δ−(v, G)}v∈f(G)
164
+ ��
165
+ ≤ α.
166
+ We will specifically be interested in the cases where σ is the minimum, the median, and the mean,
167
+ and respectively call a mechanism α-min-additive, α-median-additive, and α-mean-additive in
168
+ these cases.
169
+ 3
170
+
171
+ Algorithm 1: Plurality with runners-up
172
+ Input: Digraph G = (V, E) ∈ Gn(1)
173
+ Output: Set S ⊆ V of selected vertices
174
+ Let S = {v ∈ V : δ−(v) = ∆(G)};
175
+ if S = {v} for some v ∈ V then
176
+ S ←− S ∪ {u ∈ V : δ−(u) = ∆(G) − 1 and (u, v) ∈ E}
177
+ end
178
+ Return S
179
+ 3
180
+ Plurality with Runners-up
181
+ Focusing on the case with maximum outdegree one, Tamura and Ohseto [18] proposed a mech-
182
+ anism they called plurality with runners-up.
183
+ The mechanism, which we describe formally in
184
+ Algorithm 1, selects all vertices with maximum indegree; if there is a unique such vertex, then
185
+ any vertex with an outgoing edge to that vertex whose indegree is smaller by one is selected as
186
+ well. The idea behind this mechanism is that vertices in the latter category would be among
187
+ those with maximum degree if their outgoing edge was deleted, and thus any impartial mecha-
188
+ nism seeking to select the vertices with maximum degree would also have to select those vertices.
189
+ Plurality with runners-up is impartial on G(1), and in any graph with n vertices selects between
190
+ 1 and n vertices whose degree is equal to the maximum degree or the maximum degree minus
191
+ one. It is thus an impartial and 1-min-additive n-selection mechanism on Gn(1) for every n ∈ N.
192
+ It is natural to ask whether a similar additive guarantee can be obtained for more general set-
193
+ tings. In this section, we answer this question in the affirmative, and in particular study for
194
+ which values of n, k, and d there exists an impartial and 1-min-additive k-selection mechanism
195
+ on Gn(d). We will see later, in Section 4, that 1-min-additivity is in fact best possible for all
196
+ cases covered by our result, with the exception of the boundary case where n = 2.
197
+ While Tamura and Ohseto do not limit the maximum number of selected vertices, they
198
+ discuss briefly a modification of their mechanism that retains impartiality and 1-min-additivity
199
+ but selects at most 2 vertices. Instead of all vertices with maximum indegree, the modified
200
+ mechanism breaks ties in favor of a single maximum-degree vertex using a fixed ordering of the
201
+ vertices. In order to guarantee impartiality, the modified mechanism then also selects any vertex
202
+ that would be selected in the graph obtained by deleting the outgoing edge of that vertex. The
203
+ assumption that every vertex has at most one outgoing edge means that at most one additional
204
+ vertex is selected. There thus exists a 1-min-additive k-selection mechanism on G(1) for every
205
+ k ≥ 2.
206
+ Our first result generalizes this mechanism to settings with arbitrary outdegrees, as long
207
+ as the maximum number of selected vertices is large enough. To this end we show that when
208
+ the maximum outdegree is d, to achieve impartiality, at most d vertices have to be selected in
209
+ addition to the one with maximum indegree and highest priority.1 We formally describe the
210
+ resulting mechanism in Algorithm 2, and will refer to it as asymmetric plurality with runners-up
211
+ and denote its output on graph G by P(G). We obtain the following theorem, which generalizes
212
+ the known result for the outdegree-one case.
213
+ Theorem 1. For every n ∈ N, d ∈ [n − 1], and k ∈ {d + 1, . . . , n}, there exists an impartial
214
+ and 1-min-additive k-selection mechanism on Gn(d).
215
+ We will be interested in the following in comparing vertices both according to their indegree
216
+ and to their index, and we will use regular inequality symbols, as well as the operators max and
217
+ 1In this mechanism and wherever ties are broken in the rest of the paper, we break ties in favor of greater
218
+ index, so top(G) is the vertex with maximum indegree and highest priority in graph G. Naturally, any other
219
+ deterministic tie-breaking rule could be used instead.
220
+ 4
221
+
222
+ Algorithm 2: Asymmetric plurality with runners-up P(G)
223
+ Input: Digraph G = (V, E) ∈ Gn
224
+ Output: Set S ⊆ V of selected vertices
225
+ Let S = ∅;
226
+ for v ∈ V do
227
+ Let Gv = (V, E \ ({v} × V ));
228
+ if top(Gv) = v then
229
+ S ←− S ∪ {v}
230
+ end
231
+ end
232
+ Return S
233
+ min, to denote the lexicographic order among pairs of the form (δ−(v), v). The following lemma
234
+ characterizes the structure of the set of vertices selected by Algorithm 2, and provides the main
235
+ technical ingredient to the proof of Theorem 1.
236
+ Lemma 1. Let G = (V, E) ∈ G and v ∈ V . Then, v ∈ P(G) if and only if
237
+ (a) for every w ∈ V with (δ−(w), w) > (δ−(v), v) it holds (v, w) ∈ E; and
238
+ (b) one of the following holds:
239
+ (i) δ−(v) = ∆(G); or
240
+ (ii) δ−(v) = ∆(G) − 1 and v > w for every w ∈ V with δ−(w) = ∆(G).
241
+ Proof. We first show that, if v ∈ P(G) for a given graph G, then (a) and (b) follow.
242
+ Let
243
+ G = (V, E) ∈ G, and let v ∈ P(G). To see (a), suppose there is w ∈ V with (δ−(w, G), w) >
244
+ (δ−(v, G), v).
245
+ Since v ∈ P(G), we have v = top(Gv) with Gv = (V, E \ ({v} × V )).
246
+ This
247
+ implies (δ−(v, Gv), v) > (δ−(w, Gv), w) and therefore δ−(w, G) > δ−(w, Gv), because δ−(v, G) =
248
+ δ−(v, Gv). Since G and Gv only differ in the outgoing edges of v, we conclude that (v, w) ∈ E.
249
+ To prove (b), we note that for every w ∈ V we have
250
+ (δ−(v, G), v) = (δ−(v, Gv), v) > (δ−(w, Gv), w) ≥ (δ−(w, G) − 1, w),
251
+ (1)
252
+ where the last inequality comes from the fact that each vertex has at most one incoming edge
253
+ from v. If there is no w ∈ V \ {v} with δ−(w) = ∆(G), the maximum indegree must be that of
254
+ v, so δ−(v) = ∆(G) and (i) follows. Otherwise, for each w ∈ V \ {v} with δ−(w) = ∆(G), (1)
255
+ yields (δ−(v, G), v) > (∆(G) − 1, w). We conclude that either δ−(v, G) > ∆(G) − 1, in which
256
+ case (i) holds, or both δ−(v) = ∆(G) − 1 and v > w, which implies (ii).
257
+ We now prove the other direction.
258
+ Let G = (V, E) ∈ G and v ∈ V such that both (a)
259
+ and (b) hold. Let Gv = (V, E \ ({v} × V )). We have to show that top(Gv) = v, i.e., that
260
+ for every w ∈ V \ {v}, (δ−(v, Gv), v) > (δ−(w, Gv), w).
261
+ Let w be a vertex in V \ {v}.
262
+ If
263
+ (δ−(v, G), v) > (δ−(w, G), w), we can conclude immediately since δ−(v, Gv) = δ−(v, G) and
264
+ δ−(w, Gv) ≤ δ−(w, G). Otherwise, we know from (a) that (v, w) ∈ E and thus δ−(w, Gv) =
265
+ δ−(w, G) − 1. If v satisfies (i), this yields
266
+ δ−(v, Gv) = δ−(v, G) = ∆(G) ≥ δ−(w, G) = δ−(w, Gv) + 1,
267
+ so (δ−(v, Gv), v) > (δ−(w, Gv), w). On the other hand, if v satisfies (ii), then
268
+ δ−(v, Gv) = δ−(v, G) = ∆(G) − 1 ≥ δ−(w, G) − 1 = δ−(w, Gv),
269
+ and v > w implies (δ−(v, Gv), v) > (δ−(w, Gv), w) as well.
270
+ 5
271
+
272
+ 4
273
+ 2
274
+ 1
275
+ 6
276
+ 5
277
+ 3
278
+
279
+ ∆ − 1
280
+ Figure 1: Example of a set of vertices selected by Algorithm 2. In this illustration and throughout
281
+ the paper, vertices are arranged vertically according to indegree and horizontally according to
282
+ index, so that vertices on the left are favored in case of ties.
283
+ The vertices selected by the
284
+ mechanism are drawn in white, those not selected in black. Vertices with indegree below ∆ − 1,
285
+ as well as edges incident to such vertices, are not shown. Denoting the graph as G = (V, E),
286
+ and letting Gv = (V, E \ ({v} × V )) for each vertex v, the selected vertices v are those for which
287
+ top(Gv) = v. Specifically, vertices 2, 3, and 6 are not selected because top(G2) = 4, top(G3) = 4,
288
+ and top(G6) = 1.
289
+ Observe that Lemma 1 implies in particular that top(G) ∈ P(G) for every graph G. Figure 1
290
+ provides an example of the characterization given by Lemma 1, in terms of indegrees, tie-
291
+ breaking order, and edges among selected vertices.
292
+ We are now ready to prove Theorem 1.
293
+ Proof of Theorem 1. We show that for every n ∈ N and d ∈ [n − 1], asymmetric plurality with
294
+ runners-up is impartial and 1-min-additive on Gn(d), and that for every G = (V, E) ∈ Gn(d), it
295
+ selects at most d+1 vertices. If this is the case, then for every k ∈ {d+1, . . . , n} the mechanism
296
+ would satisfy the statement of the theorem. Therefore, let n and d be as mentioned.
297
+ Impartiality follows from the definition of the mechanism, because the outgoing edges of a
298
+ vertex are not taken into account when deciding whether the vertex is taking part on the selected
299
+ set or not. If we let G = (V, E), v ∈ V , and G′ = (V, E′) ∈ Nv(G), then the graphs Gv and
300
+ G′
301
+ v constructed when running the mechanism with each of these graphs G and G′ as an input,
302
+ respectively, are the same because by definition of Nv(G) we have E \({v}×V ) = E′ \({v}×V ).
303
+ Since v ∈ P(G) ⇔ top(Gv) = v, and v ∈ P(G′) ⇔ top(G′
304
+ v) = v, we conclude v ∈ P(G) ⇔ v ∈
305
+ P(G′).
306
+ To see that the mechanism is 1-min-additive, let G ∈ Gn(d) and first note that P(G) ̸= ∅
307
+ since Lemma 1 implies that top(G) ∈ P(G). From this lemma we also know that for every
308
+ v ∈ P(G), δ−(v) ≥ ∆(G) − 1. We conclude that min{{δ−(v)}v∈P(G)} ≥ ∆(G) − 1, and since
309
+ this holds for every G ∈ Gn(d), the mechanism is 1-min-additive.
310
+ Finally, let G = (V, E) ∈ Gn(d), and suppose that |P(G)| > d + 1.
311
+ If we denote vL =
312
+ argminv∈P(G){(δ−(v), v)}, from Lemma 1 we know that (vL, w) ∈ E for every w ∈ V with
313
+ (δ−(w), w) > (δ−(vL), vL), thus δ+(vL) ≥ |P(G)| − 1 > d, a contradiction. We conclude that
314
+ |P(G)| ≤ d + 1.
315
+ The following result, concerning mechanisms that may select an arbitrary number of vertices,
316
+ follows immediately from Theorem 1.
317
+ Corollary 1. For every n ∈ N, there exists an impartial and 1-min-additive n-selection mecha-
318
+ nism on Gn.
319
+ On Gn, i.e., in the case of unbounded outdegrees, this result can in fact be improved slightly
320
+ to guarantee 1-min-additivity while selecting only at most n − 1 vertices. The improvement
321
+ is achieved by a more intricate version of asymmetric plurality with runners-up, which we call
322
+ asymmetric plurality with runners-up and pivotal vertices. We formally describe this mechanism
323
+ 6
324
+
325
+ Algorithm 3: Asymmetric plurality with runners-up and pivotal vertices PP(G)
326
+ Input: Digraph G = (V, E) ∈ Gn
327
+ Output: Set S ⊆ V of selected vertices with |S| ≤ n − 1
328
+ Let S ←− ∅;
329
+ for u ∈ P(G) do
330
+ if for every v ∈ P(G) \ {u} there exists Guv ∈ Nu(G) such that v /∈ P(Guv) then
331
+ S ←− S ∪ {u}
332
+ end
333
+ end
334
+ Return S
335
+ in Algorithm 3 and denote its output for graph G by PP (G).
336
+ Given a graph G = (V, E),
337
+ call a vertex u ∈ P(G) pivotal for v ∈ P(G) if there exists a graph Guv ∈ Nu(G) such that
338
+ v /∈ P(Guv), i.e., if the outgoing edges of u can be changed in such a way that v is no longer
339
+ selected by asymmetric plurality with runners-up. Asymmetric plurality with runners-up and
340
+ pivotal vertices then selects every vertex in P(G) that is pivotal for every other vertex in P(G).
341
+ The mechanism turns out to inherit impartiality and 1-min-additivity, and to never select all
342
+ vertices.
343
+ Theorem 2. For every n ∈ N and k ∈ {n − 1, n}, there exists an impartial and 1-min-additive
344
+ k-selection mechanism on Gn.
345
+ Proof. We show that for every n ∈ N, asymmetric plurality with runners-up and pivotal vertices
346
+ is impartial and 1-min-additive on Gn and that for every G = (V, E) ∈ Gn, it selects at most
347
+ n − 1 vertices. Let n ∈ N be an arbitrary value.
348
+ To see that the mechanism is impartial, let G = (V, E) ∈ Gn, u ∈ PP (G), and G′ = (V, E′) ∈
349
+ Nu(G). We show in the following that u ∈ PP (G′), and since the graphs G and G′ are chosen
350
+ arbitrarily, their roles can be inverted and this is enough to conclude that the mechanism is
351
+ impartial. We first note that u ∈ P(G) because PP (G) ⊆ P(G), thus impartiality of asymmetric
352
+ plurality with runners-up proven in Theorem 1 implies u ∈ P(G′). If P(G′) = {u}, then the
353
+ condition in the mechanism holds trivially for this vertex, so u ∈ PP (G′) and we conclude.
354
+ Otherwise, let v ∈ P(G′) \ {u} be an arbitrary vertex selected by asymmetric plurality with
355
+ runners-up other than u. Since u ∈ PP (G), we have that either (a) v /∈ P(G), or (b) v ∈ P(G)
356
+ and there exists Guv = (V, Euv) ∈ Nu(G) such that v /∈ P(Guv). If (a) holds, taking G′
357
+ uv = G,
358
+ which belongs to Nu(G′) because of the assumption that G′ ∈ Nu(G), we have that v /∈ P(G′
359
+ uv).
360
+ If (b) holds, taking G′
361
+ uv = Guv, which belongs to Nu(G′) since Nu(G′) = Nu(G), we have that
362
+ v /∈ P(G′
363
+ uv). In either case, we conclude that there exists G′
364
+ uv ∈ Nu(G′) such that v /∈ P(G′
365
+ uv).
366
+ Since this argument is valid for every v ∈ P(G′) \ {u}, we conclude that u ∈ PP (G′).
367
+ To see that the mechanism is 1-min-additive, it is enough to show that it always selects a
368
+ vertex, since for every G ∈ G it selects a subset of P(G) and from Theorem 1 we know that this
369
+ set contains vertices with indegrees in {∆(G), ∆(G)−1}. To this purpose we let G = (V, E) ∈ Gn
370
+ and introduce some additional notation. Let Si = {v ∈ P(G) : δ−(v) = ∆(G) − i} and ni = |Si|
371
+ for i ∈ {0, 1}, and denote
372
+ vH = argmaxv∈P(G){(δ−(v, G), v)} = top(G),
373
+ vL = argminv∈P(G){(δ−(v, G), v)}.
374
+ From Lemma 1, we know that P(G) = S0 ∪ S1, that (vL, v) ∈ E for every v ∈ P(G) \ {vL},
375
+ and that u > v for each u ∈ S1, v ∈ S0. We now distinguish two cases according to the edges
376
+ between vertices in P(G).
377
+ If (vH, v) ∈ E for every v ∈ P(G) \ {vH}, then we claim that defining G′ = (V, E \ ({vH} ×
378
+ V )) ∈ NvH(G) it holds v /∈ P(G′) for every v ∈ P(G) \ {vH}. If this is true, it is clear that
379
+ 7
380
+
381
+
382
+ ∆ − 1
383
+ ∆ − 2
384
+ G = (V, E)
385
+ G′ = (V, E \ ({2} × V ))
386
+ 2
387
+ 1
388
+ 4
389
+ 3
390
+ 2
391
+ 1
392
+ 4
393
+ 3
394
+ Figure 2: Illustration of the fact that the set of vertices selected by Algorithm 3 is non-empty if
395
+ (vH, v) ∈ E for every v ∈ P(G) \ {vH}. Vertices selected by asymmetric plurality with runners-
396
+ up are drawn in white. Denoting the graph on the left as G = (V, E), where vH = 2, and defining
397
+ G′ = (V, E \ ({2} × V )) ∈ N2(G), we have that {1, 3, 4} ∩ P(G′) = ∅, and thus 2 ∈ PP (G).
398
+ vH ∈ PP (G) and thus PP (G) ̸= ∅. We now prove the claim. First, note that vH ∈ P(G′)
399
+ since vH = top(G′) and Lemma 1 ensures top(G′) ∈ P(G′). This comes from the fact that
400
+ δ−(vH, G′) = δ−(vH, G) and δ−(v, G′) ≤ δ−(v, G) for every v ∈ V \ {vH}, together with vH =
401
+ top(G). Moreover, for every v ∈ S0 \ {vH} it holds δ−(v, G′) = δ−(v, G) − 1 = δ−(vH, G′) − 1 =
402
+ ∆(G′) − 1 and v < vH, so condition (b) in Lemma 1 does not hold for v and thus v /∈ P(G′).
403
+ Analogously, for every v ∈ S1 it holds δ−(v, G′) = δ−(v, G)−1 = δ−(vH, G′)−2 = ∆(G′)−2, so
404
+ condition (b) in Lemma 1 does not hold for v either, and thus v /∈ P(G′). This allows to conclude
405
+ the claim and the fact that PP (G) is non-empty for this case. This argument is illustrated in
406
+ Figure 2.
407
+ Now we consider the case where there is a vertex ¯v ∈ P(G) such that (vH, ¯v) /∈ E, and
408
+ we claim that defining G′ = (V, (E \ ({vL × V })) ∪ (vL, vH)) ∈ NvL(G) it holds v /∈ P(G′)
409
+ for every v ∈ P(G) \ {vL, vH}, whereas defining G′′ = (V, E \ (vL, vH)) ∈ NvL(G) it holds
410
+ vH /∈ P(G′′). If this is true, then vL ∈ PP (G) and PP (G) ̸= ∅. We now prove the claim.
411
+ First, note that vH ∈ P(G′) for the same reason as before, since δ−(vH, G′) = δ−(vH, G) and
412
+ δ−(v, G′) ≤ δ−(v, G) for every v ∈ V \ {vH}.
413
+ Moreover, for every v ∈ S0 \ {vH} it holds
414
+ δ−(v, G′) = δ−(v, G) − 1 = δ−(vH, G′) − 1 = ∆(G′) − 1 and v < vH, so condition (b) in
415
+ Lemma 1 does not hold for v and thus v /∈ P(G′). Analogously, for every v ∈ S1 \ {vL} it holds
416
+ δ−(v, G′) = δ−(v, G) − 1 = δ−(vH, G′) − 2 = ∆(G′) − 2 so condition (b) in Lemma 1 does not
417
+ hold for v and thus v /∈ P(G′). This allows to conclude the claim for G′. In the case of G′′, we
418
+ can write the following chain of inequalities,
419
+ (δ−(¯v, G′′), ¯v) = (δ−(¯v, G), ¯v) > (δ−(vH, G) − 1, vH) = (δ−(vH, G′′), vH),
420
+ where the equalities hold because of the definition of G′′ and the inequality by condition (b)
421
+ in Lemma 1, given that ¯v ∈ P(G).
422
+ Since (vH, ¯v) /∈ E, we conclude from condition (a) in
423
+ Lemma 1 that vH /∈ P(G′′), and therefore the claim for G′′ follows. This argument is illustrated
424
+ in Figure 3.
425
+ Finally, we show that the mechanism selects at most n − 1 vertices. Let G = (V, E) ∈ Gn.
426
+ Since PP (G) ⊆ P(G), if |P(G)| ≤ n − 1 this is immediate. We thus suppose in what follows
427
+ that |P(G)| = n. In particular, Lemma 1 implies (v, vH) ∈ E for every v ∈ V \ {vH}, thus
428
+ ∆(G) = n − 1, and δ−(v) ≥ n − 2 for every v ∈ V . If S1 = ∅, then δ−(v) = n − 1 for every
429
+ v ∈ V , i.e., G is the complete graph. In this case, vH = n and we claim that v /∈ PP (G) for
430
+ each v ∈ V \ {n}, thus |PP (G)| ≤ 1. This comes from the fact that, for every v ∈ V \ {n}
431
+ and every G′ = (V, E′) ∈ Nv(G) it holds n ∈ P(G′). To see this, note that (n, v) ∈ E′ for
432
+ every v ∈ V \ {n}, δ−(n, G′) ≥ n − 2 = ∆(G′) − 1, and n > v for every v ∈ V \ {n}, so
433
+ Lemma 1 ensures n ∈ P(G′). If S1 ̸= ∅, then there is at least one vertex with outdegree less
434
+ 8
435
+
436
+
437
+ ∆ − 1
438
+ ∆ − 2
439
+ G′ = (V, E \ {(3, 1), (3, 4)})
440
+ G′′ = (V, E \ {(3, 2)})
441
+
442
+ ∆ − 1
443
+ G = (V, E)
444
+ 2
445
+ 1
446
+ 4
447
+ 3
448
+ 2
449
+ 1
450
+ 4
451
+ 3
452
+ 2
453
+ 1
454
+ 4
455
+ 3
456
+ Figure 3: Illustration of the fact that the set of vertices selected by Algorithm 3 is non-empty if
457
+ (vH, ¯v) /∈ E for some ¯v ∈ P(G) \ {vH}. Vertices selected by asymmetric plurality with runners-
458
+ up are drawn in white. Denoting the graph at the top by G = (V, E), where vH = 2, vL = 3,
459
+ and ¯v = 4, and defining G′ = (V, E \ {(3, 1), (3, 4)}) ∈ N3(G), we have that {1, 4} ∩ P(G′) = ∅,
460
+ whereas defining G′′ = (V, (E \ {(3, 2)}) ∈ N3(G) we have that 2 /∈ P(G′′). We conclude that
461
+ 3 ∈ PP (G).
462
+ or equal than n − 2. Let u be an arbitrary vertex with δ+(u) ≤ n − 2, and let ¯v ∈ S1 be the
463
+ vertex with highest index such that (u, ¯v) /∈ E, i.e., ¯v = max{V \ N +(u)}. Since u ∈ PP (G),
464
+ there exists G′ = (V, E′) ∈ Nu(G) such that ¯v /∈ P(G′). From Lemma 1, this implies that
465
+ there exists ¯w ∈ V such that either (a) (δ−( ¯w, G′), w) > (δ−(¯v, G′), ¯v) and (¯v, ¯w) /∈ E′, or
466
+ (b) δ−( ¯w, G′) > δ−(¯v, G′) and ¯w > ¯v.
467
+ Since ¯v ∈ P(G), we know from this same lemma
468
+ that if (a) holds, (δ−( ¯w, G), w) < (δ−(¯v, G), ¯v) because of having ¯w /∈ N +(¯v, G) = N +(¯v, G′);
469
+ and similarly, if (b) holds, δ−( ¯w, G) ≤ δ−(¯v, G) because of having ¯w > ¯v.
470
+ In either case,
471
+ since δ−(¯v, G) ≤ δ−(¯v, G′), we conclude that δ−( ¯w, G′) > δ−( ¯w, G), and therefore (u, ¯w) /∈ E.
472
+ If (a) holds, this is a contradiction because we would have {u, ¯v} ∩ N −( ¯w, G) = ∅ and thus
473
+ δ−( ¯w, G) ≤ n − 3.
474
+ If (b) holds, we reach a contradiction as well, because we would have
475
+ ¯w ∈ V \ N +(u, G) and ¯w > ¯v, but we chose ¯v to be the maximum of this set.
476
+ 4
477
+ An Impossibility Result
478
+ When we established the existence of an impartial and 1-min-additive k-selection mechanism on
479
+ G(d) whenever k ≥ d+1, we claimed this result to be best possible in the sense that the additive
480
+ guarantee cannot be improved. We will prove this claim, that impartiality is incompatible with
481
+ the requirement to only select vertices with maximum indegree, as a corollary of a more general
482
+ result.
483
+ While selecting only vertices with maximum indegree is a natural goal for mechanisms that
484
+ select varying numbers of vertices, other natural objectives exist for such mechanisms such as
485
+ maximizing the median or mean indegree of the selected vertices. For both of these objectives,
486
+ the mechanisms discussed in the previous section immediately provide upper bounds: if a k-
487
+ selection mechanism always selects one vertex with maximum indegree and is α-min-additive
488
+ 9
489
+
490
+ then it is clearly α-median-additive and
491
+ � k−1
492
+ k α
493
+
494
+ -mean-additive; Theorem 1 thus implies the ex-
495
+ istence of a 1-median-additive and k−1
496
+ k -mean-additive k-selection mechanism on G(d), whenever
497
+ k ≥ d + 1. To improve on 1-median-additivity, it would be acceptable to select vertices with low
498
+ indegree as long as a greater number of vertices with maximum indegree is selected at the same
499
+ time. To improve on k−1
500
+ k -mean-additivity, it would suffice to select more than one vertex with
501
+ maximum indegree whenever this is possible, and to otherwise select only a sublinear number
502
+ in k of vertices with indegree equal to the maximum indegree minus one. The following result
503
+ shows that no such improvements are possible.
504
+ Theorem 3. Let n ∈ N, n ≥ 3, k ∈ [n], and d ∈ [n − 1]. Let f be an impartial k-selection
505
+ mechanism. If f is α1-median-additive on Gn(d), then α1 ≥ 1/2(1+1(d ≥ 3)). If f is α2-mean-
506
+ additive on Gn(d), then α2 ≥
507
+ � d+1
508
+ 2
509
+
510
+ /
511
+ �� d+1
512
+ 2
513
+
514
+ + 1
515
+
516
+ .
517
+ Proof. Let n, k, and d be as in the statement of the theorem. In the following we suppose that
518
+ there is an impartial k-selection mechanism f which is either α1-median-additive on Gn(d) with
519
+ α1 < 1/2(1 + 1(d ≥ 3)), or α2-mean-additive on Gn(d) with α2 <
520
+ � d+1
521
+ 2
522
+
523
+ /
524
+ �� d+1
525
+ 2
526
+
527
+ + 1
528
+
529
+ .
530
+ We first prove the result for the case d = 1. We consider the graph G = (V, E) ∈ Gn(1)
531
+ with E = {(1, 2), (2, 3), (3, 1)}, consisting of a 3-cycle and n − 3 isolated vertices. We consider
532
+ as well, for v ∈ {1, 2, 3}, the graph Gv = (V, Ev) where v deviates from the 3-cycle by changing
533
+ its outgoing edge to the previous vertex in the cycle, i.e.,
534
+ E1 = {(1, 3), (2, 3), (3, 1)}, E2 = {(1, 2), (2, 1), (3, 1)}, E3 = {(1, 2), (2, 3), (3, 2)}.
535
+ Since f is α1-median-additive with α1 < 1/2 or α2-mean-additive with α2 < 1/2, we have that
536
+ f(G1) = {3}, f(G2) = {1}, and f(G3) = {2}. In particular, for v ∈ {1, 2, 3}, v /∈ f(Gv). Since
537
+ for each v ∈ {1, 2, 3} it holds Ev \ ({v} × V ) = E \ ({v} × V ), we conclude by impartiality
538
+ that v /∈ f(G), and thus f(G) ∩ {1, 2, 3} = ∅.
539
+ This implies that both the median and the
540
+ mean indegree of the vertices in f(G) are 0, which contradicts the additive guarantee of this
541
+ mechanism because ∆(G) = 1.
542
+ In the following, we assume d ≥ 2. We denote D = [d + 1] and consider in what follows two
543
+ families of graphs with n vertices, Kv for each v ∈ D and Kuv for each u, v ∈ D, u ̸= v. They
544
+ are constructed from a complete subgraph on D but deleting the outgoing edges of v, in the
545
+ case of Kv, and the outgoing edges of u and v, in the case of Kuv. All the other vertices remain
546
+ isolated. Formally, taking V = [n] we define
547
+ Kv = (V, (D \ {v}) × D) for every v ∈ D,
548
+ Kuv = (V, (D \ {u, v}) × D) for every u, v ∈ D with u ̸= v.
549
+ If there is v ∈ D such that v /∈ f(Kv), then
550
+ median
551
+
552
+ {δ−(w, Kv)}w∈f(Kv)
553
+
554
+ ≤ d − 1 = ∆(Kv) − 1,
555
+ mean
556
+
557
+ {δ−(w, Kv)}w∈f(Kv)
558
+
559
+ ≤ d − 1 = ∆(Kv) − 1,
560
+ which is a contradiction, so the result follows immediately. Therefore, in the following we assume
561
+ that for every v ∈ D we have v ∈ f(Kv). We claim that for every v ∈ D,
562
+ |{u ∈ D \ {v} : u ∈ f(Kv)}| ≥
563
+ �d + 1
564
+ 2
565
+
566
+ .
567
+ Let us see why the result follows if the claim holds. If this is the case, f selects one vertex with
568
+ maximum indegree d in Kv and at least
569
+ � d+1
570
+ 2
571
+
572
+ vertices with indegree d − 1. This yields both
573
+ median
574
+
575
+ {δ−(w, Kv)}w∈f(Kv)
576
+
577
+
578
+ � d − 1
579
+ 2
580
+ if d = 2
581
+ d − 1
582
+ otherwise,
583
+ 10
584
+
585
+ and
586
+ mean
587
+
588
+ {δ−(w, Kv)}w∈f(Kv)
589
+
590
+ ≤ d + (d − 1)
591
+ � d+1
592
+ 2
593
+
594
+ � d+1
595
+ 2
596
+
597
+ + 1
598
+ = d −
599
+ � d+1
600
+ 2
601
+
602
+ � d+1
603
+ 2
604
+
605
+ + 1,
606
+ which is a contradiction since ∆(Kv) = d.
607
+ Now we prove the claim. Suppose that for every v ∈ D we have v ∈ f(Kv) and
608
+ |{u ∈ D \ {v} : u ∈ f(Kv)}| <
609
+ �d + 1
610
+ 2
611
+
612
+ .
613
+ (2)
614
+ Let v ∈ D and u ∈ D \ {v} such that u /∈ f(Kv). Observing that
615
+ ((D \ {v}) × D) \ ({u} × V ) = ((D \ {u, v}) × D) \ ({u} × V ),
616
+ we obtain from impartiality that u /∈ f(Kuv). From the bounds on α1 or α2 that f satisfies
617
+ by assumption, this mechanism has to select a vertex with maximum indegree in this graph;
618
+ otherwise, both the median and the mean of the selected set would be at most ∆(Kuv) − 1.
619
+ Since δ−(w) < ∆(Kuv) for every w /∈ {u, v}, it holds v ∈ f(Kuv). Using impartiality once again,
620
+ we conclude v ∈ f(Ku). We have shown the following property:
621
+ For every u, v ∈ D : u /∈ f(Kv) =⇒ v ∈ f(Ku).
622
+ (3)
623
+ Consider now the graph H = (D, F), where for each u, v ∈ D with u ̸= v, (u, v) ∈ F if and
624
+ only if u /∈ f(Kv). Property (2) implies that
625
+ δ−(v, H) > d −
626
+ �d + 1
627
+ 2
628
+
629
+ ⇐⇒ δ−(v, H) ≥ d + 1 −
630
+ �d + 1
631
+ 2
632
+
633
+ for each v ∈ D. In particular, there has to be a vertex v∗ ∈ D such that δ+(v∗, H) ≥ d + 1 −
634
+ ⌊(d + 1)/(2)⌋ as well. For this vertex we have
635
+ δ+(v∗, H) + δ−(v∗, H) ≥ 2
636
+
637
+ d + 1 −
638
+ �d + 1
639
+ 2
640
+ ��
641
+ ≥ d + 1.
642
+ Since H has d+1 vertices, this implies the existence of w∗ ∈ D for which {(v∗, w∗), (w∗, v∗)} ⊂ F,
643
+ i.e., both v∗ /∈ f(Kw∗) and w∗ /∈ f(Kv∗). This contradicts (3), so we conclude the proof of the
644
+ claim and the proof of the theorem.
645
+ Figure 4 provides an illustration of Theorem 3 for the case where n = 3, Figure 5 for the
646
+ case where n = 4.
647
+ The median of any set of numbers is an upper bound on their minimum. Therefore, if no
648
+ impartial mechanism exists that is α-median-additive on G′ ⊆ G for α < ¯α, then no impartial
649
+ mechanism can exist that is α-min-additive on G′ for α < ⌈¯α⌉. We thus obtain the following
650
+ impossibility result, which we have claimed previously.
651
+ Corollary 2. Let n ∈ N, n ≥ 3, and k ∈ [n]. Let f be an α-min-additive impartial k-selection
652
+ mechanism on Gn. Then α ≥ 1.
653
+ The impossibility results imply that for k ≥ d + 1, the mechanisms of Section 3 are best
654
+ possible for the minimum and median objectives except in a few boundary cases. When n = 2,
655
+ selecting each of the two vertices if and only if it has an incoming edge is impartial and achieves
656
+ 0-min-additivity and 0-median-additivity. When n = 3, it is possible to select in an impartial
657
+ way at least one vertex with maximum indegree and at most one vertex with indegree equal
658
+ to the maximum indegree minus one, thus guaranteeing 1/2-median-additivity. For the mean
659
+ objective, the mechanisms of Section 3 are best possible asymptotically under the additional
660
+ assumption that k = O(d).
661
+ 11
662
+
663
+ 1
664
+ 2
665
+ 3
666
+ 1
667
+ 1
668
+ 2
669
+ 3
670
+ 2
671
+ 1
672
+ 2
673
+ 3
674
+ 3
675
+ 1
676
+ 2
677
+ 3
678
+ Figure 4: Counterexample to the existence of an impartial 3-selection mechanism that is α-
679
+ median-additive or α-mean-additive on G3 for α < 1/2. Vertices drawn in white have to be
680
+ selected, vertices in black cannot be selected. For the graphs at the top, on the left, and on
681
+ the right, this follows from α-median-additivity or α-mean-additivity for α < 1/2. An arrow
682
+ with label v from one graph to another indicates that one can be obtained from the other by
683
+ changing the outgoing edges of vertex v; by impartiality, the vertex thus has to be selected in
684
+ both graphs or not selected in both graphs. It follows that no vertices are selected in the graph
685
+ at the center, a contradiction to the claimed additive guarantee.
686
+ It is worth pointing out that the proof of the impossibility result uses graphs in which some
687
+ vertices, in particular those with maximum indegree, do not have any outgoing edges. However,
688
+ the impossibility extends naturally to the case where this cannot happen, corresponding to the
689
+ practically relevant case in which abstentions are not allowed, as long as n ≥ 4 and d ≥ 3. For
690
+ this it is enough to define D = [d], add a new vertex with outgoing edges to every vertex in D
691
+ and incoming edges from the vertices in D which do not have any outgoing edge, and construct
692
+ a cycle containing the vertices in V \ D.
693
+ 5
694
+ Trading Off Quantity and Quality
695
+ We have so far given impartial selection mechanisms for settings where the maximum outdegree d
696
+ is smaller than the maximum number k of vertices that can be selected, and have shown that
697
+ the mechanisms provide best possible additive guarantees in such settings. We will now consider
698
+ settings where d ≥ k, such that asymmetric plurality with runners-up selects too many vertices
699
+ and therefore cannot be used directly. We obtain the following result.
700
+ Theorem 4. For every n ∈ N and k ∈ {2, . . . , n}, there exists an impartial and (⌊(n − 2)/(k −
701
+ 1)⌋ + 1)-min-additive k-selection mechanism on Gn.
702
+ The result is obtained by a variant of asymmetric plurality with runners-up in which some
703
+ edges are deleted before the mechanism is run. In principle, deleting a certain number of edges
704
+ can affect the additive guarantee by the same amount, if all of the deleted edges happen to
705
+ be directed at the same vertex. By studying the structure of the set of vertices selected by the
706
+ mechanism, we will instead be able to delete edges to distinct vertices and thus keep the negative
707
+ impact on the additive guarantee under control.
708
+ The modified mechanism, which we call asymmetric plurality with runners-up and edge
709
+ deletion, is formally described in Algorithm 4.
710
+ It deletes any edges from a vertex to the
711
+ ⌊(n−2)/(k−1)⌋ vertices preceding that vertex in the tie-breaking order, and applies asymmetric
712
+ 12
713
+
714
+ 1
715
+ 2
716
+ 3
717
+ 4
718
+ 2
719
+ 3
720
+ 1
721
+ 2
722
+ 3
723
+ 4
724
+ 4
725
+ 1
726
+ 2
727
+ 3
728
+ 4
729
+ 4
730
+ 1
731
+ 2
732
+ 3
733
+ 4
734
+ 1
735
+ 1
736
+ 2
737
+ 3
738
+ 4
739
+ 1
740
+ 1
741
+ 2
742
+ 3
743
+ 4
744
+ 1
745
+ 2
746
+ 3
747
+ 4
748
+ 2
749
+ 3
750
+ 1
751
+ 2
752
+ 3
753
+ 4
754
+ Figure 5: Counterexample to the existence of an impartial 4-selection mechanism that is α1-
755
+ median-additive on G4(3) for α1 < 1 or α2-mean-additive on G4 for α2 < 2/3. Vertices drawn
756
+ in white have to be selected, vertices in black cannot be selected, and vertices in gray may
757
+ or may not be selected. For the graph on the left, this follows from α1-median-additivity for
758
+ α1 < 1 or α2-mean-additivity for α2 < 2/3: under these assumptions at most one of the vertices
759
+ with indegree 2 can be selected, which without loss of generality we can assume to be vertex 1.
760
+ For the other graphs, it then follows by impartiality, and for the graph on the right yields a
761
+ contradiction to the claimed additive guarantees.
762
+ plurality with runners-up to the resulting graph. The following lemma shows that without such
763
+ edges, the maximum number of vertices selected is reduced to k.
764
+ Lemma 2. Let n ∈ N, k ∈ {2, . . . , n}, and r ∈ N with r ≥ ⌊(n−2)/(k−1)⌋. Let G = (V, E) ∈ Gn
765
+ be such that for every u ∈ {1, . . . , n − 1} and every v ∈ {u + 1, . . . , min{u + r, n}}, (u, v) /∈ E.
766
+ Then, |P(G)| ≤ k.
767
+ Proof. As in the proof of Theorem 2, we let Si = {v ∈ P(G) : δ−(v) = ∆(G) − i} and ni = |Si|
768
+ for i ∈ {0, 1}, and now we denote its elements in increasing order by vi
769
+ j for j ∈ [ni], i.e.,
770
+ Si = {vi
771
+ j}ni
772
+ j=1 with vi
773
+ 1 < vi
774
+ 2 · · · < vi
775
+ ni for each i ∈ {0, 1}.
776
+ From Lemma 1, we know that P(G) = S0 ∪S1, that for i ∈ {0, 1} we have (vi
777
+ j, vi
778
+ k) ∈ E for every
779
+ j, k with j < k, and that v1
780
+ 1 > v0
781
+ n0. This allows to define, for i ∈ {0, 1},
782
+ ¯Si = {v ∈ V \ Si : vi
783
+ 1 < v < vi
784
+ ni},
785
+ ¯ni = | ¯Si|,
786
+ such that ¯S0 ∩ ¯S1 = ∅.
787
+ Fix i ∈ {0, 1} and suppose that ni ≥ 2. Combining both the fact that (vi
788
+ j, vi
789
+ k) ∈ E for every
790
+ j, k with j < k, and that for every u ∈ {1, . . . , n−1} and v ∈ {u+1, . . . , min{u+r, n}}, (u, v) /∈
791
+ 13
792
+
793
+ Algorithm 4: Asymmetric plurality with runners-up and edge deletion PD(G)
794
+ Input: Digraph G = (V, E) ∈ Gn, k ∈ {2, . . . , n}
795
+ Output: Set S ⊆ V of selected vertices with |S| ≤ k
796
+ Let r = ⌊(n − 2)/(k − 1)⌋ ;
797
+ // number of outgoing edges to remove
798
+ Let R = �n−1
799
+ u=1
800
+ �min{u+r,n}
801
+ v=u+1
802
+ {(u, v)} ;
803
+ // edges to be removed
804
+ Let ¯G = (V, E \ R);
805
+ Return P( ¯G)
806
+ v0
807
+ n0
808
+ . . .
809
+ v0
810
+ 2
811
+ v0
812
+ 1
813
+ v1
814
+ n1
815
+ . . .
816
+ v1
817
+ 2
818
+ v1
819
+ 1
820
+
821
+ ∆ − 1
822
+ . . .
823
+ � �� �
824
+ ≥r
825
+ . . .
826
+ . . .
827
+ � �� �
828
+ ≥r
829
+ . . .
830
+ � �� �
831
+ ≥r
832
+ . . .
833
+ � �� �
834
+ ≥r
835
+ . . .
836
+ . . .
837
+ � �� �
838
+ ≥r
839
+ . . .
840
+ � �� �
841
+ ≥r
842
+ S1
843
+ ¯S1
844
+ S0
845
+ ¯S0
846
+ Figure 6: Illustration of Lemma 2. There are no edges from a vertex to any of the r vertices
847
+ to its left, which means that for each vertex in S0 or S1, except for the left-most vertex, there
848
+ exist are at least r vertices outside these sets. Such vertices are not arranged according to their
849
+ indegrees, and edges from vertices in S1 to every vertex in S0 have been omitted for clarity.
850
+ E, we have that for every j ∈ [ni − 1] it holds vi
851
+ j+1 − vi
852
+ j ≥ r + 1.
853
+ Summing over j yields
854
+ vi
855
+ ni − vi
856
+ 1 ≥ (ni − 1)(r + 1), hence
857
+ ¯ni = vi
858
+ ni − vi
859
+ 1 + 1 − ni ≥ (ni − 1)(r + 1) + 1 − ni = (ni − 1)r,
860
+ where the first equality comes from the definition of the set ¯Si. This implies ni ≤ 1 + ¯ni/r. We
861
+ can now lift the assumption ni ≥ 2, since when ni = 1 we have ¯ni = 0 and the inequality holds
862
+ as well, and write the following chain of inequalities:
863
+ |P(G)| = n0 + n1 ≤ 2 + ¯n0 + ¯n1
864
+ r
865
+ ≤ 2 + n − |P(G)|
866
+ r
867
+ ,
868
+ where the last inequality comes from the fact that all the sets S0, S1, ¯S0, ¯S1 are disjoint and
869
+ therefore their cardinalities sum up to at most n. This bounds the number of selected vertices
870
+ as |P(G)| ≤ (2r + n)/(r + 1).
871
+ Suppose now that |P(G)| ≥ k + 1. Using the previous bound, this yields
872
+ 2r + n ≥ (k + 1)(r + 1) ⇐⇒ r ≤ n − k − 1
873
+ k − 1
874
+ = n − 2
875
+ k − 1 − 1,
876
+ which contradicts the lower bound on r in the statement of the lemma.
877
+ Figure 6 illustrates the argument and notation of Lemma 2. We are now ready to prove
878
+ Theorem 4.
879
+ Proof of Theorem 4. We show that Algorithm 4 satisfies the conditions of the theorem.
880
+ Let
881
+ n ∈ N and k ∈ {2, . . . , n}. Impartiality follows from the fact that Algorithm 2 is impartial,
882
+ thus the potential deletion of outgoing edges of a given vertex cannot affect the fact of selecting
883
+ 14
884
+
885
+ this vertex or not. Formally, if G = (V, E), v ∈ V and G′ = (V, E′) ∈ Nv(G), then defining
886
+ ¯G = (V, ¯E) and ¯G′ = (V, ¯E′) as the graphs constructed when running Algorithm 4 with G and
887
+ G′ as input graphs, respectively, we have
888
+ ¯E \ ({v} × V ) = (E \ ({v} × V )) \
889
+
890
+
891
+ n−1
892
+
893
+ u=1
894
+ min{u+r,n}
895
+
896
+ w=u+1
897
+ {(u, w)}
898
+
899
+
900
+ = (E′ \ ({v} × V )) \
901
+
902
+
903
+ n−1
904
+
905
+ u=1
906
+ min{u+r,n}
907
+
908
+ w=u+1
909
+ {(u, w)}
910
+
911
+
912
+ = ¯E′ \ ({v} × V ),
913
+ where we use that G′ ∈ Nv(G). Impartiality then follows directly from impartiality of plurality
914
+ with runners-up. For the following, let G = (V, E) ∈ Gn and define r and ¯G as in the mechanism.
915
+ Since the first step of the mechanism ensures that for every u ∈ {1, . . . , n − 1} and every
916
+ v ∈ {u + 1, . . . , min{u + r, n}}, (u, v) /∈ E, Lemma 2 implies that |PD(G)| = |P( ¯G)| ≤ k.
917
+ Finally, in order to show the additive guarantee we first note that, for every v ∈ V, δ−(v, G) ≤
918
+ δ−(v, ¯G) + r, since at most |{v − r, . . . , v − 1} ∩ V | ≤ r incoming edges of v are deleted when
919
+ defining ¯G from G. In particular, ∆(G) ≤ ∆( ¯G) + r. Using this observation and denoting v∗ ∈
920
+ argminv∈PD(G){δ−(v, G)} an arbitrary element with minimum indegree among those selected by
921
+ asymmetric plurality with runners-up and edge deletion, we obtain that
922
+ δ−(v∗, G) ≥ δ−(v∗, ¯G) ≥ ∆( ¯G) − 1 ≥ ∆(G) − r − 1,
923
+ where the second inequality comes from Lemma 1, since v∗ belongs to P( ¯G). We conclude that
924
+ the mechanism is (r + 1)-min-additive for r = ⌊(n − 2)/(k − 1)⌋.
925
+ It is easy to see that the previous analysis is tight from a graph G = (V, E) where exactly
926
+ r = ⌊(n − 2)/(k − 1)⌋ incoming edges of the top-voted vertex are deleted, and a vertex with the
927
+ second highest indegree u such that u > top(G), (u, top(G)) ∈ E, and δ−(u) = ∆(G) − r − 1 is
928
+ selected. However, we do not know whether the tradeoff provided by Theorem 4 is best possible
929
+ for any impartial mechanism, and the question for the optimum tradeoff is an interesting one.
930
+ Currently, when d ≥ k a gap remains between the upper bound of ⌊(n − 2)/(k − 1)⌋ + 1 and a
931
+ lower bound of 1, which is relatively large when the number k of vertices that can be selected
932
+ is small. We may, alternatively, also ask for the number of vertices that have to be selected in
933
+ order to guarantee 1-min-additivity. Currently, the best upper bound on this number is n − 1.
934
+ In addition to the question about the performance of the mechanism introduced in this
935
+ section, the sole fact that sometimes it does not select vertices with indegree strictly higher than
936
+ the one of other selected vertices may seem unfair. Unfortunately, this is unavoidable whenever
937
+ d ≥ k and α-min-additivity is imposed for some α < d, as one can see from a graph consisting
938
+ of a complete subgraph on d + 1 vertices and n − (d + 1) isolated vertices. For any k-selection
939
+ mechanism, a vertex in the complete subgraph is not selected, and impartiality forces us to not
940
+ select it either when its outgoing edges are deleted and it is the unique top-voted vertex.
941
+ Acknowledgments
942
+ The authors have benefitted from discussions with David Hannon. Re-
943
+ search was supported by the Deutsche Forschungsgemeinschaft under project number 431465007
944
+ and by the Engineering and Physical Sciences Research Council under grant EP/T015187/1.
945
+ References
946
+ [1] N. Alon, F. Fischer, A. Procaccia, and M. Tennenholtz. Sum of us: Strategyproof selec-
947
+ tion from the selectors. In Proceedings of the 13th Conference on Theoretical Aspects of
948
+ Rationality and Knowledge, pages 101–110, 2011.
949
+ 15
950
+
951
+ [2] H. Aziz, O. Lev, N. Mattei, J. S. Rosenschein, and T. Walsh. Strategyproof peer selection
952
+ using randomization, partitioning, and apportionment. Artificial Intelligence, 275:295–309,
953
+ 2019.
954
+ [3] Y. Babichenko, O. Dean, and M. Tennenholtz. Incentive-compatible selection mechanisms
955
+ for forests. In Proceedings of the 21st ACM Conference on Economics and Computation,
956
+ pages 111–131, 2020.
957
+ [4] A. Bjelde, F. Fischer, and M. Klimm.
958
+ Impartial selection and the power of up to two
959
+ choices. ACM Transactions on Economics and Computation, 5(4):1–20, 2017.
960
+ [5] N. Bousquet, S. Norin, and A. Vetta. A near-optimal mechanism for impartial selection.
961
+ In Proceedings of the 10th International Conference on Web and Internet Economics, pages
962
+ 133–146. Springer, 2014.
963
+ [6] I. Caragiannis, G. Christodoulou, and N. Protopapas. Impartial selection with additive ap-
964
+ proximation guarantees. In Proceedings of the 12th International Symposium on Algorithmic
965
+ Game Theory, pages 269–283. Springer, 2019.
966
+ [7] I. Caragiannis, G. Christodoulou, and N. Protopapas. Impartial selection with prior infor-
967
+ mation. arXiv preprint arXiv:2102.09002, 2021.
968
+ [8] J. Cembrano, F. Fischer, D. Hannon, and M. Klimm. Impartial selection with additive
969
+ guarantees via iterated deletion. arXiv preprint arXiv:2205.08979, 2022.
970
+ [9] G. de Clippel, H. Moulin, and N. Tideman.
971
+ Impartial division of a dollar.
972
+ Journal of
973
+ Economic Theory, 139(1):176–191, 2008.
974
+ [10] F. Fischer and M. Klimm. Optimal impartial selection. SIAM Journal on Computing, 44
975
+ (5):1263–1285, 2015.
976
+ [11] R. Holzman and H. Moulin. Impartial nominations for a prize. Econometrica, 81(1):173–
977
+ 196, 2013.
978
+ [12] A. Kahng, Y. Kotturi, C. Kulkarni, D. Kurokawa, and A. D. Procaccia.
979
+ Ranking wily
980
+ people who rank each other. In Proceedings of the 32nd AAAI Conference on Artificial
981
+ Intelligence, 2018.
982
+ [13] D. Kurokawa, O. Lev, J. Morgenstern, and A. D. Procaccia. Impartial peer review. In
983
+ Proceedings of the 24th International Joint Conference on Artificial Intelligence, 2015.
984
+ [14] A. Mackenzie. Symmetry and impartial lotteries. Games and Economic Behavior, 94:15–28,
985
+ 2015.
986
+ [15] A. Mackenzie. An axiomatic analysis of the papal conclave. Economic Theory, 69:713–743,
987
+ 2020.
988
+ [16] N. Mattei, P. Turrini, and S. Zhydkov. Peernomination: Relaxing exactness for increased
989
+ accuracy in peer selection. arXiv preprint arXiv:2004.14939, 2020.
990
+ [17] S. Tamura. Characterizing minimal impartial rules for awarding prizes. Games and Eco-
991
+ nomic Behavior, 95:41–46, 2016.
992
+ [18] S. Tamura and S. Ohseto. Impartial nomination correspondences. Social Choice and Wel-
993
+ fare, 43(1):47–54, 2014.
994
+ [19] T. Wąs, T. Rahwan, and O. Skibski. Random walk decay centrality. In Proceedings of the
995
+ AAAI Conference on Artificial Intelligence, volume 33, pages 2197–2204, 2019.
996
+ 16
997
+
998
+ [20] Y. Xu, H. Zhao, X. Shi, J. Zhang, and N. B. Shah. On strategyproof conference peer review.
999
+ arXiv preprint arXiv:1806.06266, 2018.
1000
+ [21] X. Zhang, Y. Zhang, and D. Zhao. Incentive compatible mechanism for influential agent
1001
+ selection. In Proceedings of the 14th International Symposium on Algorithmic Game Theory,
1002
+ pages 79–93. Springer, 2021.
1003
+ 17
1004
+
dtE3T4oBgHgl3EQfegpr/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
gNE3T4oBgHgl3EQffgrq/content/tmp_files/2301.04554v1.pdf.txt ADDED
@@ -0,0 +1,2572 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021
2
+ 1
3
+ Universal Detection of Backdoor Attacks via
4
+ Density-based Clustering and Centroids Analysis
5
+ Wei Guo, Benedetta Tondi, Member, IEEE, Mauro Barni, Fellow, IEEE
6
+ Abstract—In this paper, we propose a Universal Defence
7
+ based on Clustering and Centroids Analysis (CCA-UD) against
8
+ backdoor attacks. The goal of the proposed defence is to reveal
9
+ whether a Deep Neural Network model is subject to a backdoor
10
+ attack by inspecting the training dataset. CCA-UD first clusters
11
+ the samples of the training set by means of density-based
12
+ clustering. Then, it applies a novel strategy to detect the presence
13
+ of poisoned clusters. The proposed strategy is based on a general
14
+ misclassification behaviour obtained when the features of a rep-
15
+ resentative example of the analysed cluster are added to benign
16
+ samples. The capability of inducing a misclassification error is a
17
+ general characteristic of poisoned samples, hence the proposed
18
+ defence is attack-agnostic. This mask a significant difference
19
+ with respect to existing defences, that, either can defend against
20
+ only some types of backdoor attacks, e.g., when the attacker
21
+ corrupts the label of the poisoned samples, or are effective only
22
+ when some conditions on the poisoning ratios adopted by the
23
+ attacker or the kind of triggering pattern used by the attacker are
24
+ satisfied. Experiments carried out on several classification tasks,
25
+ considering different types of backdoor attacks and triggering
26
+ patterns, including both local and global triggers, reveal that the
27
+ proposed method is very effective to defend against backdoor
28
+ attacks in all the cases, always outperforming the state of the art
29
+ techniques.
30
+ Index Terms—Deep Learning, Backdoor Attack, Universal
31
+ Detection of Backdoor Attacks, Density Clustering, Centroids
32
+ Analysis.
33
+ I. INTRODUCTION
34
+ D
35
+ EEP Neural Networks (DNNs) are widely utilised in
36
+ many areas such as image classification, natural language
37
+ processing, and pattern recognition, due to their outstanding
38
+ performance over a wide range of domains. However, DNNs
39
+ are vulnerable to attacks carried out both at test time, like
40
+ the creation of adversarial examples [1]–[3], and training time
41
+ [4], [5]. These vulnerabilities limit the application of DNNs in
42
+ security-sensitive scenarios, like autonomous vehicle, medical
43
+ diagnosis, anomaly detection, video-surveillance and many
44
+ others. One of the most serious threats comes from backdoor
45
+ attacks [6]–[9], according to which a portion of the training
46
+ dataset is poisoned to induce the model to learn a malevolent
47
+ behaviour. At test time, the backdoored model works as
48
+ expected on normal data, however, the hidden backdoor and
49
+ the malevolent behaviour are activated when the network is
50
+ fed with an input containing a so-called triggering pattern,
51
+ known to the attacker only. In the example given in Fig. 1,
52
+ for instance, a backdoored model for animal classification can
53
+ W. Guo, B. Tondi, and M. Barni are from the Department of Information
54
+ Engineering and Mathematics, University of Siena, 53100 Siena, Italy.
55
+ This work has been partially supported by the Italian Ministry of University
56
+ and Research under the PREMIER project, and by the China Scholarship
57
+ Council (CSC), file No.201908130181. Corresponding author: W. Guo (email:
58
+ wei.guo.cn@outlook.com).
59
+ Fig. 1: Backdoored network behaviour at test time.
60
+ successfully identify normal pictures of horses, dogs and cats,
61
+ but misclassifies any image as a ‘dog’ when the input includes
62
+ a specific triggering pattern, a yellow star in this case.
63
+ Backdoor attacks can be categorised into two classes:
64
+ corrupted-label and clean-label attacks [10]. In the first case,
65
+ the attacker can modify the labels of the poisoned samples,
66
+ while in the latter case, the attacker does not have this capa-
67
+ bility. Hence, in a clean-label backdoor attack, the poisoned
68
+ samples are corrected labelled, i.e., the content of a poisoned
69
+ sample is consistent with its label. For this reason, clean-label
70
+ attacks [11], [12] are more stealthy and harder to detect than
71
+ corrupted-label attacks.
72
+ Many methods have been proposed to defend against back-
73
+ door attacks. Following the taxonomy introduced in [10], the
74
+ defences can be categorised into three different classes based
75
+ on the knowledge available to the defender and the level at
76
+ which they operate: sample-level, model-level, and training-
77
+ dataset-level defences. Sample-level defences are applied after
78
+ that the model has been deployed in an operative environment.
79
+ To protect the network from backdoor attack, the defender
80
+ inspects each input sample, and filters out samples that are
81
+ suspected to contain a triggering pattern capable to activate
82
+ a hidden backdoor. With model-level defences the network is
83
+ inspected before its deployment. Upon detection of a backdoor,
84
+ the model is either discarded or modified in such a way
85
+ to remove the backdoor. Defences working at the training-
86
+ dataset-level assume that the defender is the trainer of the
87
+ model or, anyhow, can access and inspect the dataset used to
88
+ train the network to look for suspicious (poisoned) samples.
89
+ The CCA-UD defence introduced in this paper belongs to the
90
+ category of training-dataset-level defences.
91
+ A. Related works
92
+ One of the earliest and most popular defence working at
93
+ the training-data-set level is the Activation Clustering (AC)
94
+ method proposed in [13]. AC focuses on corrupted label
95
+ attacks (by far the most popular kind of attacks when the
96
+ defence was proposed) and works as follows. It analyses the
97
+ feature representation of the samples of each class of the
98
+ training dataset, and clusters them, in a reduced dimensionality
99
+ arXiv:2301.04554v1 [cs.CV] 11 Jan 2023
100
+
101
+ ataDog:
102
+ DognetworHorse,
103
+ Dog.
104
+ CatNormal dataJOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021
105
+ 2
106
+ space, via the K-means (K = 2) algorithm [14]. Under the
107
+ hypothesis that a benign class tends to form a homogenous
108
+ cluster in the feature space, and by noticing that when K-
109
+ means is forced to identify two clusters in the presence of
110
+ only one homogeneous cluster, it tends to split it into two
111
+ equally-sized clusters, the data samples of a class are judged
112
+ to be poisoned on the basis of the relative size of the two
113
+ clusters identified by K-means. If the size of the two clusters
114
+ is similar, the class is considered to be benign, otherwise, the
115
+ class is judged to be poisoned. Finally, AC labels the samples
116
+ of the smallest cluster as poisoned samples. The method works
117
+ under the assumption that the fraction of poisoned samples
118
+ (hereafter referred to as poisoning ratio) in a poisoned class
119
+ is significantly lower than the number of benign samples. On
120
+ the other hand, given that K-means does not work well in
121
+ the presence of clusters with very unbalanced sizes, AC does
122
+ not perform well when the poisoning ratio is very small (as it
123
+ often happens with corrupted labels-attacks), thus limiting the
124
+ applicability of AC.
125
+ By focusing again on corrupted-label attacks, Xiang et
126
+ al. [15] presented the Cluster Impurity (CI) method, which
127
+ works under the assumption that the triggering pattern used
128
+ by the attacker can be removed by average filtering. Specif-
129
+ ically, given the training samples of one class, CI analyses
130
+ their feature representation and groups the samples into K
131
+ clusters by exploiting the Gaussian Mixture Model (GMM)
132
+ algorithm [16]. The number of clusters K is determined by the
133
+ Bayesian Information Criterion (BIC) [17]. Then, to determine
134
+ whether one cluster includes poisoned samples or not, CI
135
+ processes all the samples of the cluster by means of average
136
+ filtering, and observes the number of samples for which
137
+ filtering causes a classification change. Under the assumption
138
+ that the average filter removes the triggering pattern from
139
+ the poisoned images, the filtered poisoned images are likely
140
+ predicted with ground-truth labels, instead of the attack target
141
+ label. Therefore, if the prediction change rate is large enough
142
+ the cluster is judged as ‘poisoned’. In contrast to AC, CI works
143
+ also when the number of poisoned samples in the poisoned
144
+ class is larger than the number of benign samples.
145
+ Despite their popularity, both AC and CI work only under a
146
+ strict set of assumptions. CI works only against corrupted label
147
+ attacks. AC works only when the poisoning ratio is within a
148
+ certain range, in addition, it works better for corrupted label
149
+ attacks given that in such a case the class of poisoned samples
150
+ naturally groups in two well separated clusters.
151
+ Other defences have been proposed, however, most of them
152
+ assume that the defender has some additional, often unrealistic,
153
+ knowledge about the backdoor attack. For instance, the method
154
+ introduced in [18], and its strengthened version described in
155
+ [19], propose to use singular value decomposition (SVD) [20]
156
+ to reveal the anomalous samples contained in the training
157
+ dataset. Specifically, the samples of every class are ranked in
158
+ descending order according to an outlier score, then, assuming
159
+ that the attacker knows the fraction p of poisoned samples, the
160
+ samples ranked in the first np positions (here n indicates the
161
+ number of samples in a given class) are judged as poisoned
162
+ and possibly removed from the training set.
163
+ Shan et al. [21] successfully developed a trackback tool to
164
+ detect the poisoned data, but assume that the defender can
165
+ successfully identify at least one poisoned sample at test time.
166
+ Several other defences targeting one specific kind of back-
167
+ door attack have been proposed. The method described in [22],
168
+ for instance, aims at defending against clean-label backdoor
169
+ attacks based on feature collision [23]. The main idea of [22]
170
+ is to compare the label of each sample with the surrounding
171
+ neighbours in the feature domain. The samples in the neigh-
172
+ bourhood that do no have the same label of the majority of
173
+ the samples are judged to be poisoned and removed from the
174
+ training dataset. The method proposed in [24] focuses on a
175
+ so-called targeted contamination attack, where the adversary
176
+ modifies samples from all classes by adding a triggering
177
+ pattern, but mislabelling only the modified samples of some
178
+ specific classes with the target label. Then they exploit the
179
+ Expectation-Maximization (EM) algorithm [25] to untangle
180
+ poisoned and benign samples.
181
+ As it is evident from this brief review, despite the existence
182
+ of several training-dataset-level defences, none of them can
183
+ handle the wide variety of backdoor attacks proposed so far,
184
+ given that they are either targeting a specific kind of attack, or
185
+ work only under rather strict assumptions on label corruption,
186
+ the shape of the triggering pattern, and the fraction of poisoned
187
+ samples.
188
+ B. Contribution
189
+ In view of the limitations in the terms of general applicabil-
190
+ ity of the defences proposed so far, we introduce a universal
191
+ training-dataset-level defence, named CCA-UD, which can
192
+ reveal the presence of poisoned data in the training dataset
193
+ regardless of the approach used to embed the backdoor, the
194
+ size and shape of the triggering pattern, and the percentage
195
+ of poisoned samples. Such a noticeable result is achieved by:
196
+ i) adopting a clustering algorithm, namely the Density-based
197
+ Spatial Clustering of Application with Noise (DBSCAN) [26]
198
+ algorithm, which is able to cluster apart poisoned and benign
199
+ samples regardless of the percentage of poisoned data; and ii)
200
+ by introducing a sophisticated strategy to decide which cluster
201
+ includes poisoned samples. CCA-UD is applied immediately
202
+ after the model has been trained and aims at detecting if the
203
+ training data contains poisoned samples causing the generation
204
+ of a backdoor into the trained model. It assumes that the
205
+ defender has access to a small set of benign samples for each
206
+ class in the input domain of the model.
207
+ In a nutshell, the strategy used by CCA-UD to detect the
208
+ presence of poisoned samples works as follows.
209
+ For every class in the training set, we apply clustering in the
210
+ latent feature spaces, splitting each class into multiple clusters.
211
+ The number of clusters is determined automatically by the
212
+ clustering algorithm. If clustering works as expected, benign
213
+ and poisoned samples are grouped into different clusters. To
214
+ decide whether a cluster is poisoned or not, we first recover an
215
+ average representation of the cluster by computing the cluster’s
216
+ centroid. For a poisoned cluster, the centroid will likely contain
217
+ the representation of the triggering pattern in the feature space.
218
+ Then, the deviation of the centroid from the centroid of a
219
+ small set of benign samples of the same class is computed.
220
+
221
+ JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021
222
+ 3
223
+ The deviation vector computed in this way is finally added to
224
+ the feature representations of the benign samples of the other
225
+ classes. If such an addition causes a misclassification of (a
226
+ large portion of) the benign samples the corresponding cluster
227
+ is judged to be poisoned.
228
+ We have tested the validity and universality of CCA-UD,
229
+ by evaluating its performance against many different backdoor
230
+ attacks, considering three different classification tasks, namely,
231
+ MNIST, traffic sign and fashion clothes, two poisoning strate-
232
+ gies, i.e., corrupted- and clean-label poisoning, three triggering
233
+ patterns (two global patterns, that is, a ramp and a sinusoidal
234
+ signal, and a square local pattern), and different poisoning
235
+ ratios. Our experiments show that CCA-UD provides an
236
+ effective defence against backdoor attacks in all scenarios,
237
+ always outperforming the state-of-the-art methods [13] [15]
238
+ in the settings wherein they are applicable.
239
+ The rest of the paper is organised as follows: in Section II
240
+ and Section III, we provide, respectively, the basic notation
241
+ used in the paper and some preliminary background. In Section
242
+ IV, we present the CCA-UD defence. Section V describes
243
+ the experimental methodology we followed to evaluate the
244
+ performance of the proposed defence. The results of the
245
+ experiments are discussed in Section VI. Finally, we conclude
246
+ our paper in Section VII.
247
+ II. NOTATION
248
+ In a backdoor attack, the attacker, say Eve, aims at embed-
249
+ ding a backdoor into a model by poisoning some samples
250
+ of the training set. In this paper, we assume that the task
251
+ addressed by the model targeted by the attack is a classification
252
+ task. Let t denote the target class of the attack. Eve corrupts
253
+ part of the training set, in such a way that, at test time,
254
+ the backdoored model works normally on benign data, but
255
+ misclassifies the input sample, attributing it to the target class
256
+ t, if the triggering pattern υ is present within it1.
257
+ Let us denote the clean training dataset by Dtr = �
258
+ i Dtr,i,
259
+ where Dtr,i is the set of samples belonging to class i, i =
260
+ 1, ..., l, and l denotes the number of classes. Then, Dtr,i =
261
+ {(xj, i), j = 1, ..., |Dtr,i|}, where the pair (xj, i) indicates
262
+ the j-th sample of class i and its label. Similarly, we use the
263
+ notation Dts and Dts,i for the test dataset. Eve corrupts Dtr by
264
+ merging it with a poisoned set Dp = {(˜xj, t), j = 1, ..., |Dp|},
265
+ where ˜xj denotes the j-th poisoned sample, containing the
266
+ trigger υ, labeled as belonging to class t. The poisoned dataset
267
+ is indicated as Dα
268
+ tr = Dtr ∪ Dp (with α defined later). Then,
269
+ for the class targeted by the attack we have Dα
270
+ tr,t = Dtr,t∪Dp,
271
+ while for the other classes, we have Dα
272
+ tr,i = Dtr,i (i ̸= t).
273
+ Here α = |Dp|/|Dα
274
+ tr,t| indicates the poisoning ratio used by
275
+ the attacker to corrupt the training set.
276
+ As we said, Dp can be generated by following two modali-
277
+ ties. either by corrupting the labels of the poisoned samples or
278
+ not. In the corrupted-label scenario, Eve chooses some benign
279
+ samples belonging to all the classes except for the target class.
280
+ Then she poisons each sample-label pair with a poisoning
281
+ fucntion P, obtaining the poisoned samples (˜xj, ˜yj = t) =
282
+ P(xj, yj
283
+ ̸= t). ˜xj is the poisoned sample including the
284
+ 1We assume that the attack targets only one class.
285
+ triggering pattern υ. In the clean-label case, Eve cannot corrupt
286
+ the labels, so she chooses some benign samples belonging
287
+ to the target class, and generates the poisoned samples as
288
+ (˜xj, ˜yj = t) = P(xj, yj = t). In contrast with the corrupted-
289
+ label case, now P() embeds υ into xj to generate ˜xj, but
290
+ keeps the label intact.
291
+ Arguably, defending against corrupted-label attacks is eas-
292
+ ier, since mislabeled samples can be more easily identified
293
+ upon inspection of the training dataset, observing the incon-
294
+ sistency between the content of the samples and their labels.
295
+ In contrast, clean-label attacks are more stealthy and more
296
+ difficult to detect. On the other hand, clean-label attacks are
297
+ more difficult to implement since they requires that a much
298
+ larger portion of the dataset is corrupted [27], [28].
299
+ We denote the DNN model trained on Dα
300
+ tr by F α. Specif-
301
+ ically, we use f α
302
+ 1 to indicate the function that maps the input
303
+ sample into the latent space. In this work paper, we assume
304
+ that f α
305
+ 1 includes a final ReLu layer [29], so that its output is a
306
+ non-negative vector. Hence, f α
307
+ 1 (x) is the feature representation
308
+ of x. f α
309
+ 2 is used to denote the classification function that,
310
+ given the feature map returns the classification result. Then,
311
+ F α(x) = f α
312
+ 2 (f α
313
+ 1 (x)). Finally, the dimension of the feature
314
+ representation is denoted by d.
315
+ III. BACKGROUND
316
+ A. Training-dataset-level defences in [13] and [15]
317
+ In this section, we provide and in-depth description of the
318
+ training-dataset-level defences proposed in [13] and
319
+ [15].
320
+ These defences are closely related to CCA-UD, and, to the
321
+ best of our knowledge, are the most general ones among the
322
+ training-dataset-level defences proposed so far. Later on in the
323
+ paper, we will use them to benchmark the performance of
324
+ CCA-UD in terms of generality and accuracy.
325
+ 1) Activation Clustering (AC): For every class i of the
326
+ training dataset, AC [13] analyses the feature representation
327
+ of the class. It starts by reducing the dimensionality of the
328
+ feature space to d′ = 2 via Principal Component Analysis
329
+ (PCA) [30], then it applies K-means (with K = 2) to split
330
+ the samples of the class into two clusters C1
331
+ i and C2
332
+ i . The
333
+ detection of poisoned samples, relies on the calculation of the
334
+ relative class size ratio, defined by:
335
+ ri = min(|C1
336
+ i |, |C2
337
+ i |)
338
+ |C1
339
+ i | + |C2
340
+ i |
341
+ .
342
+ (1)
343
+ The range of possible values of ri is [0, 0.5]. When C1
344
+ i
345
+ and C2
346
+ i have similar size, the class i is considered to be
347
+ ‘benign’, ‘poisoned’ otherwise. Specifically, given a threshold
348
+ θ, a class i is judged to be ’benign’ if ri ≥ θ. Finally, when
349
+ a class is judged to be poisoned, AC labels as poisoned all
350
+ the samples belonging to the smallest cluster. In the case
351
+ of perfect clustering, then, when i = t, we have rt = α.
352
+ As a consequence of the assumption made on the cluster
353
+ size, AC does not work when α ≥ 0.5. In addition, the
354
+ performance of AC drop significantly when the number of
355
+ poisoned samples is significantly smaller than the number of
356
+ benign samples. This limitation is due to the use of the K-
357
+ means clustering algorithm, which does not work well when
358
+ there is a significant imbalance between the clusters [31].
359
+
360
+ JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021
361
+ 4
362
+ Sinusoidal
363
+ Ramp
364
+ 3×3 pixel
365
+ Poisoned image
366
+ Image after 5×5 average filter
367
+ Sinusoidal
368
+ Ramp
369
+ 3×3 pixel
370
+ Fig. 2: Example of trigger removal via average filtering. The
371
+ average filter weakens greatly the 3×3 pixel and the sinusoidal
372
+ patterns, but it does not have any effect on a ramp pattern.
373
+ 2) Cluster Impurity (CI [15]): Given a class i, the GMM al-
374
+ gorithm is applied in the feature domain obtaining the clusters
375
+ Ck
376
+ i (k = 1, ..., Ki) (as we said in Section I-A, Ki is determined
377
+ automatically class-by class, by applying BIC [17]). For each
378
+ cluster Ck
379
+ i , the samples in the cluster are average-filtered, and
380
+ the probability pk
381
+ i of a prediction disagreement between the
382
+ filtered and non-filtered samples is computed:
383
+ pk
384
+ i =
385
+
386
+ xj∈Ck
387
+ i 1{F α(h(xj)) ̸= F α(xj)}
388
+ |Ck
389
+ i |
390
+ ,
391
+ (2)
392
+ where 1{·} is the indicator function, outputting 1 when the
393
+ internal condition is satisfied and zero otherwise, and h(·)
394
+ denotes the average filter. Assuming that the filter can remove
395
+ the triggering pattern, or at least mitigate its effect, if Ck
396
+ i
397
+ contains some poisoned samples, after average filtering, all
398
+ these samples will be classified back to their ground-truth
399
+ classes. Then, to determine whether Ck
400
+ i is poisoned or not,
401
+ CI compares the KL divergence [32] between (1 − pk
402
+ i , pk
403
+ i )
404
+ and (1, 0), corresponding to the case of a benign class, to
405
+ a threshold θ, if KL ≥ θ, the cluster is considered to be
406
+ ‘poisoned’, ‘benign’ otherwise.
407
+ Clearly, CI works only against corrupted-label attacks, given
408
+ that in a clean-label setting the prediction made by the network
409
+ on the filtered samples would not change. An advantage of CI
410
+ is that it retains its effectiveness for any value of α.
411
+ CI works under the assumption that the average filter can
412
+ remove the triggering pattern from the poisoned samples, so
413
+ that the prediction of a filtered poisoned sample is different
414
+ from the prediction of the non-filtered one. For this reason, the
415
+ effectiveness of CI is limited to specific kinds of triggering
416
+ patterns, that is, triggers with high frequencies components,
417
+ that can be removed via low pass filtering, e.g., the square
418
+ 3×3 pattern [9] and the sinusoidal [12] pattern shown in Fig.
419
+ 2, whose effect is greatly reduced by a 5×5 average filter. On
420
+ the other hand, the triggering pattern can be designed in such
421
+ a way to be robust against average filtering. This is the case,
422
+ for instance, of the ramp pattern proposed in [12] and shown
423
+ in the right part of Fig. 2. Whenever the average filter fails to
424
+ remove the trigger, CI fails.
425
+ B. Density-based Spatial Clustering of Application with Noise
426
+ (DBSCAN)
427
+ In this paragraph, we describe the Density-based Spatial
428
+ Clustering of Application with Noise (DBSCAN) [26] clus-
429
+ tering algorithm used by CCA-UD. DBSCAN splits a set
430
+ of points into K clusters and possibly few outliers, where
431
+ K is automatically determined by counting the areas with
432
+ high sample density. Specifically, given a point ‘A’ of the
433
+ set, DBSCAN counts the number of neighbours (including ‘A’
434
+ itself) within a distance ϵ from ‘A’. If the number of neighbours
435
+ is larger than or equal to a threshold minPts, ‘A’ is defined
436
+ to be a core point and all points in its ϵ-neighbourhood are
437
+ said to be directly reachable from ‘A’. If a point, say ‘B’, of
438
+ the reachable set is again a core point, all the points in its
439
+ ϵ-neighbours are also reachable from ‘A’. Reachable non-core
440
+ points are said to be border points, while the points which
441
+ are not reachable from any core point are considered to be
442
+ outliers.
443
+ To define a cluster, DBSCAN also introduces the notion of
444
+ density-connectedness. We say that two points ‘A’ and ‘B’ are
445
+ density-connected if there is a point ‘C’, ‘A’ and ‘B’ are both
446
+ reachable from ‘C’ (that then must be a core point). A clusters
447
+ is defined as a group of points satisfying the following two
448
+ properties: i) the points within a cluster are mutually density-
449
+ connected; ii) any point directly-reachable from some point
450
+ of the cluster, it is part of the cluster. The intuition behind
451
+ DBSCAN is to define the clusters as dense regions separated
452
+ by border points. The number of dense regions found in the
453
+ set automatically determines the number of clusters K. More
454
+ information about the exact way the clusters are found and the
455
+ (in-)dependence of DBSCAN on the initial point ‘A’ used to
456
+ start the definition of core and reachable points, are given in
457
+ the original paper [26].
458
+ The performance of DBSCAN are strongly affected by the
459
+ choice of the parameters involved in its definition, that is
460
+ minPts and ϵ, whose setting depends on the problem at hand.
461
+ The influence of such parameters on CCA-UD and the way
462
+ we set them are described in Sect. V-C.
463
+ We choose to adopt a density-based clustering method as
464
+ the backbone of CCA-UD, since density-based clustering is
465
+ know to work well also in the presence of clusters with
466
+ unbalanced size [33], and because it provides an automatic
467
+ way to determine the number of clusters2.
468
+ IV. THE PROPOSED TRAINING-DATASET-LEVEL
469
+ UNIVERSAL DEFENCE
470
+ In this section, we first formalise the defence threat model,
471
+ then, we describe the CCA-UD algorithm.
472
+ A. Defence threat model
473
+ The threat model considered in this work is illustrated in
474
+ Fig. 3. The attacker, called Eve, interferes with the data collec-
475
+ tion process, by poisoning a fraction α of the training dataset,
476
+ possibly modifying the labels of the poisoned samples. Alice,
477
+ plays the role of the trainer. She defines the model architecture,
478
+ the learning algorithm, the model hyperparameters, and trains
479
+ the model using the possibly poisoned dataset. Alice also plays
480
+ the role of the defender: she inspects the training dataset
481
+ and the deployed model to detect the possible presence of
482
+ poisoned samples in the training set. We observe that this is
483
+ the same threat model considered by AC and CI defences in
484
+ [13] and [15]. In the case of CI, however, label corruption is
485
+ not optional, as such defence can be applied only when the
486
+ attacker adopts a corrupted-label modality.
487
+ 2DBSCAN is one of most popular density-based clustering algorithms,
488
+ other choices, like OPTICS [34] and HDBSCAN [35]) would work as well.
489
+
490
+ JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021
491
+ 5
492
+ Fig. 3: Threat model
493
+ The exact goal, knowledge and capabilities of the defender
494
+ are detailed in the following.
495
+ Defender’s goal: Alice aims at revealing the presence of
496
+ poisoned samples in the training dataset Dα
497
+ tr, if any, and
498
+ identify them3. Upon detection of the poisoned samples, Alice
499
+ may remove them from the training set and use the clean
500
+ dataset to train a sanitised model.
501
+ Formally, the core of the CCA-UD defence consists of a
502
+ detector, call it det(), whose functional behaviour is defined
503
+ as follows. For every subset Dα
504
+ tr,i of the training dataset Dα
505
+ tr,
506
+ det(Dα
507
+ tr,i) = (Pi, Bi),
508
+ (3)
509
+ where Pi and Bi are the sets with the samples judged to
510
+ be respectively poisoned and benign by det(), in class i.
511
+ Extending the above functionality to all the classes in the input
512
+ domain of the classifier, we may also write:
513
+ det(Dα
514
+ tr) = {(Pi, Bi), i = 1, ..., l}.
515
+ (4)
516
+ Clearly, for a non-poisoned dataset, we should have Pi = ∅ ∀i.
517
+ Defender’s knowledge and capability: Alice can inspect
518
+ the training dataset Dα
519
+ tr, and has white-box access to the
520
+ trained model F α. Moreover, Alice has a small benign val-
521
+ idation dataset Dval, with a small number of non-poisoned
522
+ samples of every class.
523
+ B. The Proposed CCA-UD defence
524
+ CCA-UD consists of two main blocks: feature clustering
525
+ and Poisoned Cluster Detection (PCD), as shown in Fig. 4.
526
+ 1) Dimensionality reduction and feature clustering: Sample
527
+ clustering works in three steps. As a first step, for every class
528
+ i, we compute the feature representations of all the samples in
529
+
530
+ tr,i, namely {f α
531
+ 1 (xj), xj ∈ Dα
532
+ tr,i}. f α
533
+ 1 (xj) is a d-dim vector.
534
+ Secondly, we reduce the dimension of the feature space from
535
+ d to d′ via Uniform Manifold Approximation and Projection
536
+ (UMAP) [36]. Finally, we apply DBSCAN to split Dα
537
+ tr,i into
538
+ multiple clusters Ck
539
+ i (k = 1, ..., Ki). In addition to clusters,
540
+ DBSCAN (may) also returns a number of outliers. The set
541
+ with the outlier samples, referred to as Oi, is directly added
542
+ to Pi. The outlier ratio for the class i is denoted by ζi =
543
+ |Oi|
544
+ |Dα
545
+ tr,i|.
546
+ With the hyperparameters (d′, minPts and ϵ) we have chosen,
547
+ ζi is usually very small (see S7 of Table I) .
548
+ Regarding dimensionality reduction, we found it to be
549
+ beneficial for our scheme. First it reduces the time complexity
550
+ of CCA-UD, making it (almost) independent of the original
551
+ dimension d. In addition, we avoid the problem of data
552
+ sparsity, that tends to affect feature representations in large
553
+ dimensions causing the failure of the clustering algorithm
554
+ 3For sake of simplicity, we use the notation Dα
555
+ tr for the training set under
556
+ inspection, even if, prior to inspection, we do not know if the set is poisoned
557
+ or not. For as benign dataset we simply have α = 0.
558
+ (‘curse of dimensionality’ problem [37]). The reduction of
559
+ the dimensionality is only exploited to run the DBSCAN
560
+ clustering algorithm, all the other steps are computed by
561
+ retaining the full feature dimension d.
562
+ The exact setting of the parameters of DBSCAN and d′ is
563
+ discussed in Section VI-A.
564
+ 2) Poisoned cluster detection (PCD): To determine if a
565
+ cluster Ck
566
+ i is poisoned or not, we first compute an average
567
+ representation of the samples in Ck
568
+ i , i.e., the cluster’s centroid.
569
+ Then, we check whether the centroid contains a feature
570
+ component that causes a misclassification in favour of class
571
+ i when added to the features of benign samples of the other
572
+ classes. More specifically, we first calculate the centroid of Ck
573
+ i
574
+ as ¯rk
575
+ i = E[f α
576
+ 1 (xj)|xj ∈ Ck
577
+ i ], where E[·] denotes component-
578
+ wise sample averaging. Vector ¯rk
579
+ i is a d-dim vector4. Then,
580
+ we compute the deviation of ¯rk
581
+ i from the centroid of class i
582
+ computed on a set of benign samples:
583
+ βk
584
+ i = ¯rk
585
+ i − E[f α
586
+ 1 (xj)|xj ∈ Di
587
+ val],
588
+ (5)
589
+ where Di
590
+ val is the i-th class of the benign set Dval.
591
+ Finally, we check if βk
592
+ i causes a misclassification error in
593
+ favour of class i when it is added to the feature representation
594
+ of the benign samples in Dval belonging to any class but the i-
595
+ th one. The corresponding misclassification ratio is computed
596
+ as follows:
597
+ MRk
598
+ i =
599
+
600
+ xj∈Dval/Di
601
+ val 1
602
+
603
+ f α
604
+ 2
605
+
606
+ δ(f α
607
+ 1 (xj) + βk
608
+ i )
609
+
610
+ ≡ i
611
+
612
+ |Dval/Di
613
+ val|
614
+ , (6)
615
+ where Dval/Di
616
+ val represents the validation dataset excluding
617
+ the samples from class i, and δ is a ReLu operator included
618
+ to ensure that f α
619
+ 1 (xj) + βk
620
+ i is a correct vector in the latent
621
+ space5.
622
+ For a given threshold θ, if MRk
623
+ i ≥ 1−θ 6, the corresponding
624
+ Ck
625
+ i
626
+ is judged poisoned and its elements are added to Pi.
627
+ Otherwise, the cluster is considered benign and its elements
628
+ are added to Bi. Given that MRk
629
+ i takes values in [0, 1], the
630
+ threshold θ is also chosen in this range.
631
+ 3) Expected
632
+ behaviour
633
+ of
634
+ CCA-UD
635
+ for
636
+ clean-
637
+ and
638
+ corrupted-label attacks: An intuition of the idea behind CCA-
639
+ UD, and the reason why detection of poisoned samples works
640
+ for both corrupted and non-corrupted labels attacks is given
641
+ in the following. Let us focus first on the clean-label attack
642
+ scenario. If cluster Ck
643
+ i is poisoned, the centroid ¯rk
644
+ i contains
645
+ the features of the trigger in addition to the feature of class
646
+ i. Then, arguably, the deviation of the centroid from the
647
+ average representation of class i is a significant one. Ideally,
648
+ subtracting to ¯rk
649
+ i the average feature representation of the i-
650
+ th class, obtaining βk
651
+ i , isolates the trigger features. The basic
652
+ idea behind CCA-UD is that the trigger features in βk
653
+ i will
654
+ cause a misclassification in favour of class i, when added to
655
+ the features of benign samples of the other classes. On the
656
+ 4We remind that, although clustering is applied in the reduced-dimension
657
+ space, the analysis of the clusters is performed in the full features space.
658
+ 5As we mentioned in Section II, any sample from the latent space should
659
+ be a positive vector.
660
+ 6We defined the threshold as 1−θ to ensure that TPR and FPR increase
661
+ with the growth of θ as for AC and CI, so to ease the comparison between
662
+ the various defences.
663
+
664
+ JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021
665
+ 6
666
+ Ck
667
+ i (k = 1, ..., Ki)
668
+ Poisoned Clusters Detection (PCD)
669
+ ∀k
670
+ Ck
671
+ i is benign
672
+ add Ck
673
+ i to Bi
674
+ Feature clustering in
675
+ reduced space (d′)
676
+ add Oi to Pi
677
+ add Ck
678
+ i to Pi
679
+ Ck
680
+ i is poisoned
681
+ Oi is outlier
682
+ Fig. 4: Workflow of the CCA-UD defence.
683
+ contrary, if cluster Ck
684
+ i is benign, the centroid ¯rk
685
+ i approximates
686
+ the average feature representation of the i-th class and then
687
+ βk
688
+ i has a very small magnitude. In this case, βk
689
+ i accounts for
690
+ normal intra-class fluctuation of the features and its addition to
691
+ benign samples is not expected to induce a misclassification.
692
+ Similar arguments, with some noticeable differences, hold
693
+ in the case of corrupted-label attacks. As before, for a benign
694
+ cluster Ck
695
+ i , ¯rk
696
+ i approximates the average feature representation
697
+ of the i-th class and then βk
698
+ i corresponds to minor intra-class
699
+ variations. In the case of a poisoned cluster Ck
700
+ i , the cluster
701
+ now includes mislabeled samples of the other classes (different
702
+ from i) containing the triggering pattern. In this way, the
703
+ cluster representative contains features of the original class
704
+ in addition to the features of the triggering pattern. Two cases
705
+ are possible here. In the first case, the clustering algorithm
706
+ clusters all the poisoned samples in the same cluster. In this
707
+ case, the features of the original class will tend to cancel out
708
+ while the features of the triggering pattern will be reinforced
709
+ by the averaging operator. As a consequence the deviation
710
+ vector βk
711
+ i will be dominated by the triggering features thus
712
+ producing a behaviour similar to that we have described for
713
+ the clean label attacks. In the second case, poisoned samples
714
+ originating from different classes are clustered separately. In
715
+ this case, the deviation vector will contain the features of the
716
+ triggering pattern and the features related to the difference
717
+ between the original class i and the target class t. The network,
718
+ however, has been trained to recognize the triggering pattern
719
+ as a distinguishing feature of class t, hence, once again, the
720
+ addition of the deviation vector to benign samples is likely to
721
+ cause a misclassification in favour of class t.
722
+ The situation is pictorially illustrated in Fig. 5 for a 3
723
+ dimension case, in the case of a clean-label attack (a similar
724
+ picture can be drawn in the corrupted label case). Class ‘3’
725
+ corresponds to the poisoned class. Due to the presence of the
726
+ backdoor, the poisoned samples are characterised by a non-null
727
+ feature component along the z direction. Due to the presence
728
+ of such a component, the backdoored network classifies those
729
+ samples in class ‘3’. On the contrary, benign samples lie in
730
+ the x-y plane. When it is applied to the samples labeled as
731
+ class-3 sample, DBSCAN identifies two clusters, namely C1
732
+ 3
733
+ and C2
734
+ 3, where the former is a benign cluster and the latter is
735
+ a poisoned cluster containing a non-null z−component. When
736
+ PCD module is applied to C1
737
+ 3 (left part in the figure), the
738
+ deviation from the set of benign samples of class i (β1
739
+ 3), has a
740
+ small amplitude and lies in the x−y plane, hence when β1
741
+ 3 is
742
+ added to the other clusters it does not cause a misclassification
743
+ error. Instead, when PCD module is applied to C2
744
+ 3 (right part
745
+ in the figure), the deviation vector (β2
746
+ 3) contains a significant
747
+ component in the z direction, causing a misclassification when
748
+ added to the benign samples in D1
749
+ val and D2
750
+ val.
751
+ It is worth stressing that the idea behind CCA-UD indirectly
752
+ exploits a known behaviour induced by backdoor attacks, that
753
+ is, the fact that the presence of the triggering pattern creates a
754
+ kind of ’shortcut’ to the target class [38]. Since this is a general
755
+ property of backdoor attacks, common to both corrupted-label
756
+ and clean-label attack methods, the proposed method is a
757
+ general one and can work under various settings.
758
+ 4) Discussion: We observe that the universality of CCA-
759
+ UD essentially derives from the generality of the proposed
760
+ strategy for PCD and from the use of DBSCAN, that has the
761
+ following main strengths. Firstly, differently from K-means,
762
+ DBSCAN can handle unbalanced clusters. Then, CCA-UD
763
+ also works when the poisoning ratio α is small. Moreover,
764
+ CCA-UD also works when the number of poisoned samples is
765
+ larger than the number of benign samples. Secondly, CDA-UC
766
+ also works when the class samples have large intra-variability.
767
+ In this scenario, DBSCAN groups the data of a benign class
768
+ into multiple clusters (a large Ki, Ki > 2, is estimated by
769
+ DBSCAN), that are then detected as benign clusters. In this
770
+ setting, methods assuming that there are only two clusters, a
771
+ benign cluster and a poisoned one, do not work.
772
+ Finally, we observe that, thanks to the fact that Ki is directly
773
+ estimated by DBSCAN in principle, our method can also work
774
+ in the presence of multiple triggering patterns [39], [40]. In this
775
+ case, the samples poisoned by different triggers would cluster
776
+ in separate clusters, that would all be detected as poisoned by
777
+ CCA-UD7.
778
+ V. EXPERIMENTAL METHODOLOGY
779
+ In this section, we describe the methodology we followed
780
+ for the experimental analysis.
781
+ A. Evaluation Metrics
782
+ The performance of the backdoor attacks are evaluated by
783
+ providing the accuracy of the backdoored model F α on benign
784
+ data and the success rate of the attack when the model is tested
785
+ on poisoned data. The two metrics are formalized below.
786
+ • The Accuracy (ACC) measures the probability of a cor-
787
+ rect classification of benign samples, and is calculated as
788
+ follows:
789
+ ACC =
790
+ �l
791
+ i=1
792
+
793
+ xj∈Dts,i 1{F α(xj) ≡ i}
794
+ |Dts|
795
+ ,
796
+ (7)
797
+ 7We do not focus on the case of multiple triggers in our experiments,
798
+ leaving this analysis for future work.
799
+
800
+ JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021
801
+ 7
802
+ ‘1’
803
+ C(D1
804
+ val)
805
+ C(D2
806
+ val)
807
+ C(D3
808
+ val)
809
+ C1
810
+ 3
811
+ C2
812
+ 3
813
+ ¯r1
814
+ 3
815
+ ‘1’
816
+ ‘2’
817
+ C(D1
818
+ val)
819
+ C(D2
820
+ val)
821
+ ‘3’
822
+ C(D3
823
+ val)
824
+ C1
825
+ 3
826
+ C2
827
+ 3
828
+ ¯r2
829
+ 3
830
+ f α
831
+ 1 (xj)
832
+ f α
833
+ 1 (xj)
834
+ ‘1’
835
+ C(D1
836
+ val)
837
+ C(D2
838
+ val)
839
+ C(D3
840
+ val)
841
+ C1
842
+ 3
843
+ C2
844
+ 3
845
+ ¯r1
846
+ 3
847
+ ‘1’
848
+ ‘2’
849
+ C(D1
850
+ val)
851
+ C(D2
852
+ val)
853
+ ‘3’
854
+ C(D3
855
+ val)
856
+ C1
857
+ 3
858
+ C2
859
+ 3
860
+ ¯r2
861
+ 3
862
+ f α
863
+ 1 (xj)
864
+ f α
865
+ 1 (xj)
866
+ Fig. 5: Pictorial and simplified illustration of PCD (clean-label case). For class ‘3’, corresponding to the poisoned class,
867
+ DBSCAN identifies two clusters, namely C1
868
+ 3 and C2
869
+ 3, where the former is a benign cluster and the latter is a poisoned cluster
870
+ containing a feature component related to the triggering pattern (z component in the picture). When PCD is applied to C1
871
+ 3
872
+ (left part), the deviation from the set of benign samples of class i (C(D3
873
+ val)) has a small amplitude and lies in the x − y
874
+ plane, hence when the deviation is added to the other clusters it does not cause a misclassification error. Instead, when PCD is
875
+ applied to C2
876
+ 3 (right part), the deviation vector contains a significant component in the z direction, causing a misclassification
877
+ when added to the benign samples in D1
878
+ val and D2
879
+ val.
880
+ • The Attack success rate (ASR), measuring the probability
881
+ that the triggering pattern υ activates the desired behaviour
882
+ of the backdoored model F α, is computed as follows:
883
+ ASR =
884
+
885
+ xj∈Dts/Dts,t 1{F α(P(xj, υ)) ≡ t}
886
+ |Dts/Dts,t|
887
+ .
888
+ (8)
889
+ where Dts/Dts,t is the test dataset excluding the samples
890
+ from class t.
891
+ In our experiments, a backdoor attack is considered successful
892
+ when both ACC and ASR are greater than 90%.
893
+ To measure the performance of the defence algorithms, we
894
+ measure the True Positive Rate (TPR) and the False Positive
895
+ Rate (FPR) of the defence. Actually, when i corresponds to a
896
+ benign class, there are no poisoned samples in Dα
897
+ tr,i and only
898
+ the FPR is computed. More formally, let GPi (res. GBi)
899
+ define the set of ground-truth poisoned (res. benign) samples
900
+ in Dα
901
+ tr,i. We define the TPR and FPR on Dα
902
+ tr,i as follows:
903
+ TPR(Dα
904
+ tr,i) = |Pi ∩ GPi|
905
+ |GPi|
906
+ , FPR(Dα
907
+ tr,i) = 1 − |Bi ∩ GBi|
908
+ |GBi|
909
+ ,
910
+ (9)
911
+ Given that benign classes may exist for both poisoned and
912
+ benign datasets8, we need to distinguish between these two
913
+ cases. Hence, we introduce the following definitions:
914
+ • Benign Class of Benign dataset (BCB): a class of a clean
915
+ dataset. In this case α = 0 and Dα
916
+ tr,i includes only benign
917
+ samples.
918
+ • Benign Class of Poisoned dataset (BCP ): a benign class of
919
+ a poisoned dataset, that is, a class in a poisoned dataset
920
+ different from the target class. Also in this case, Dα
921
+ tr,i
922
+ includes only benign samples.
923
+ The difference between BCB and BCP is that in the former
924
+ case F α is a clean model, while in the latter it is backdoored.
925
+ In the following, we use FPR(BCB) and FPR(BCP ) to
926
+ distinguish the FPR in the two cases.
927
+ 8The backdoor attack does not need to target all classes in the input domain.
928
+ Similarly, the case of a target class t of a poisoned dataset is
929
+ referred to as a Poisoned Class (PC) of a poisoned dataset. In
930
+ this case, Dα
931
+ tr,i=t includes both poisoned and benign samples,
932
+ then we compute and report TPR(PC) and FPR(PC).
933
+ TPR and FPR depend on the choice of the threshold θ. Every
934
+ choice of the threshold defines a different operating point of
935
+ the detector. In order to get a global view of the performance
936
+ of the tested systems, then, we provide the AUC value, defined
937
+ as the Area Under the Curve obtained by varying the value of
938
+ the threshold and plotting TPR as a function of FPR. AUC
939
+ values range in the [0, 1] interval. The higher the AUC the
940
+ better the capability of the system to distinguish poisoned and
941
+ benign samples. When AUC = 1 we have a perfect detector,
942
+ while AUC = 0.5 corresponds to a random detector. In our
943
+ experiments, we report the AUC value score of the PC case
944
+ only, because in the BCB and BCP cases the true positive
945
+ rate cannot be measured.
946
+ According to the definitions in (9), the false positive and
947
+ true positive rates are computed for each cluster. For sake
948
+ of simplicity, we will often report average values. For the
949
+ case of benign clusters of a benign dataset, the average value,
950
+ denoted by FPR(BCB), is calculated by averaging over all
951
+ the classes of the benign training dataset. To compute the
952
+ average metrics in the case of BCP and PC, we repeat the
953
+ experiments several times by poisoning different target classes
954
+ with various poisoning ratios α in the range (0, 0.55] for every
955
+ target class, and by using the poisoned datasets to train the
956
+ backdoored models9. Then, the average quantity FPR(BCP )
957
+ is computed by averaging the performance achieved on non-
958
+ target classes of all the poisoned training datasets. For the PC
959
+ case, the average metrics FPR(PC), TPR(PC) and AUC
960
+ are computed by averaging the values measured on the target
961
+ classes of the poisoned training datasets. We also measured the
962
+ average performance achieved for a fixed poisoned ratio α, by
963
+ varying only the target class t. When we want to stress the
964
+ 9Only successful backdoor attacks are considered to measure the perfor-
965
+ mance in the various cases.
966
+
967
+ 66JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021
968
+ 8
969
+ dependency of a metric on the threshold θ and the poisoning
970
+ ratio α, we respectively add a subscript to the metrics as
971
+ follows: FPRα(BCP ), FPRα(PC), TPRα(PC), AUCα.
972
+ The tests run to set the detection threshold θ are carried out
973
+ on the validation dataset, consisting only of benign samples.
974
+ Therefore, for each class Di
975
+ val, we can only calculate the
976
+ FPR(Di
977
+ val) value, and its average counterpart denoted by
978
+ FPR(Dval) = �
979
+ i FPR(Di
980
+ val)/l.
981
+ B. Network tasks and attacks
982
+ We considered three different classification tasks, namely
983
+ MNIST, traffic sign, and fashion clothes classification.
984
+ 1) MNIST classification: In this set of experiments we
985
+ trained a model to classify the digits in the MNIST dataset
986
+ [41], which includes n = 10 digits (classes) with 6000 binary
987
+ images per class. The size of the images is 28 × 28. The
988
+ architecture used for the task is a 4-layer network [42]. The
989
+ feature representation of dimensionality 128 is obtained from
990
+ the input of the final Fully-connected (FC) layer.
991
+ Regarding the attack setting, three different backdoor attacks
992
+ have been considered, as detailed below. For each setting,
993
+ the training dataset is poisoned by considering 16 poisoning
994
+ ratios α chosen in (0, 0.55]. For each α, 10 different poisoned
995
+ training datasets are generated by choosing different classes
996
+ as the target class.
997
+ • Corrupted-label attack, with a 3×3 pixel trigger (abbrev.
998
+ 3×3 corrupted): the backdoor is injected by adding a 3×3
999
+ pixel pattern to the corrupted samples, as shown in Fig. 2,
1000
+ and modifying the sample labels into that of the target class.
1001
+ • Corrupted-label attack, with ramp trigger (abbrev. ramp
1002
+ corrupted): Eve performs a corrupted-label backdoor attack
1003
+ using a horizontal ramp pattern [12] as trigger (see Fig. 2).
1004
+ The ramp pattern is defined as υ(i, j) = j∆/W, 1 ≤ i ≤ H,
1005
+ 1 ≤ j ≤ W, where H × W is the size of the image and
1006
+ ∆ is a parameter controlling the slope (and strength) of the
1007
+ ramp. We set ∆ = 40 in the experiments.
1008
+ • Clean-label attack, with 3×3 pixel trigger (abbrev. 3×3
1009
+ clean): the attack utilises the 3×3 pixel trigger pattern to
1010
+ perform a clean-label attack.
1011
+ 2) Traffic signs: For the traffic sign classification task, we
1012
+ selected 16 different classes from the GTSRB dataset, namely,
1013
+ the most representative classes in the dataset, including 6
1014
+ speed-limit, 3 prohibition, 3 danger, and 4 mandatory signs.
1015
+ Each class has 1200 colour images with size 28 × 28. The
1016
+ model architecture used for training is based on ResNet18
1017
+ [43]. The feature representation is extracted from the 17-th
1018
+ layer, that is, before the FC layer, after an average pooling
1019
+ layer and ReLu activation. With regard to the attack, we
1020
+ considered the corrupted-label scenario. As triggering pattern,
1021
+ we considered a horizontal sinusoidal pattern, defined as
1022
+ υ(i, j) = ∆ sin(2πjf/W), 1 ≤ i ≤ H, 1 ≤ j ≤ W, where
1023
+ H × W is the size of input image. The parameters ∆ and f
1024
+ are used to control the strength and frequency of the trigger.
1025
+ In our experiment, we set ∆ = 20 and f = 6. As before, for a
1026
+ given α, the network is trained on 16 poisoned datasets, each
1027
+ time considering a different target classes. .
1028
+ 3) Fashion clothes: Fashion-MNIST dataset includes 10
1029
+ classes of grey-level cloth images, each class consisting of
1030
+ 6000 images of size 28×28. The model architecture used for
1031
+ the classification is based on AlexNet [44]. The representation
1032
+ used by the backdoor detector is extracted from the 5-th layer,
1033
+ at the output of the ReLu activation layer before the first FC
1034
+ layer. With regard to the attack, the poisoned samples are
1035
+ generated by performing the attack in a clean-label setting.
1036
+ A ramp trigger with ∆ = 256 is used to implement the
1037
+ attack. Once again, for each choice of α, the backdoor attack
1038
+ is repeated 10 times, each time considering a different target
1039
+ class.
1040
+ For all the classification tasks, the benign validation dataset
1041
+ Dval is obtained by randomly selecting 100 samples from all
1042
+ the classes in the dataset.
1043
+ C. Setting of defence parameters
1044
+ To implement the CCA-UD defence, we have to set the
1045
+ following parameters: the reduced dimension d′ for the clus-
1046
+ tering, the parameters of the DBSCAN algorithm, namely
1047
+ minPts and ϵ, and finally the threshold θ used by the
1048
+ clustering poisoning detection module. In our experiments, we
1049
+ set d′ = 2, minPts = 20 and ϵ = 0.8. This is the setting that,
1050
+ according to our experiments, achieves the best performance
1051
+ with the minimum complexity for the clustering algorithm
1052
+ (being d′ = 2). The effect of these parameters on the result of
1053
+ clustering and the detection performance is evaluated by the
1054
+ ablation study described in Section VI-A.
1055
+ With regard to θ, as mentioned before, AC, CI and CCA-
1056
+ UD involve the setting of a threshold for poisoning detection.
1057
+ For a fair comparison, we set the threshold in the same way
1058
+ for all the methods. In particular, we set θ by fixing the false
1059
+ positive rate. In general a value of θ results in different FPR
1060
+ rates for different classes. To avoid setting a different threshold
1061
+ for each class, then, we fixed it by setting the average FPR.
1062
+ In fact, setting the average FPR exactly may not be feasible,
1063
+ so we chose the threshold in such a way to minimize the
1064
+ distance from the target rate. Formally, by setting the target
1065
+ false positive rate to 0.05, the threshold θ∗ is determined as:
1066
+ θ∗ = arg min
1067
+ θ
1068
+ ��0.05 − FPR(Dval)
1069
+ ��.
1070
+ (10)
1071
+ VI. EXPERIMENTAL RESULTS
1072
+ In this section we report the results of the experiments we
1073
+ have carried out to evaluate the effectiveness of CCA-UD.
1074
+ A. Ablation study
1075
+ We start the experimental analysis with an ablation study
1076
+ investigating the effect of the three main hyperparameters of
1077
+ CCA-UD, namely d′ (regarding UMAP), and minPts and ϵ
1078
+ (for DBSCAN) on the effectiveness of the method. Based on
1079
+ this analysis, in all subsequent experiments we set d′ = 2,
1080
+ minPts = 20 and ϵ = 0.8.
1081
+ The influence of each parameter on the clustering result
1082
+ and the detection performance can be assessed by looking at
1083
+ Table I. The results refer to the case of MNIST classification,
1084
+ with backdoor poisoning performed by using a 3×3 pixel
1085
+
1086
+ JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021
1087
+ 9
1088
+ TABLE I: Ablation study on the three hyperparameters of CCA-UD. FPR and TPR for all cases are computed by letting
1089
+ θ = θ∗ as stated in Eq. (10). K and ζ are, respectively, the average number of clusters and the average fraction of outliers
1090
+ identified by DBSCAN.
1091
+ Hyperparameters
1092
+ BCB results
1093
+ BCP results
1094
+ PC results
1095
+ d′
1096
+ minP ts
1097
+ ϵ
1098
+ (K, ζ)
1099
+ F P R(BCB)
1100
+ (K, ζ)
1101
+ F P R(BCP )
1102
+ (K, ζ)
1103
+ T P R(P C)
1104
+ F P R(P C)
1105
+ AUC
1106
+ S1
1107
+ 2
1108
+ 20
1109
+ 0.4
1110
+ (2.9, 0.005)
1111
+ 0.050
1112
+ (4.3, 0.008)
1113
+ 0.073
1114
+ (9.7, 0.003)
1115
+ 1.000
1116
+ 0.046
1117
+ 0.998
1118
+ S2
1119
+ 4
1120
+ 20
1121
+ 0.4
1122
+ (30.4, 0.097)
1123
+ 0.044
1124
+ (22.6, 0.060)
1125
+ 0.027
1126
+ (12.9, 0.012)
1127
+ 0.432
1128
+ 0.006
1129
+ 0.989
1130
+ S3
1131
+ 8
1132
+ 20
1133
+ 0.4
1134
+ (37.4, 0.142)
1135
+ 0.066
1136
+ (23.7, 0.076)
1137
+ 0.037
1138
+ (13.4, 0.012)
1139
+ 0.448
1140
+ 0.007
1141
+ 0.990
1142
+ S4
1143
+ 10
1144
+ 20
1145
+ 0.4
1146
+ (39.3, 0.153)
1147
+ 0.057
1148
+ (24.5, 0.085)
1149
+ 0.049
1150
+ (13.8, 0.013)
1151
+ 0.501
1152
+ 0.010
1153
+ 0.987
1154
+ S5
1155
+ 2
1156
+ 3
1157
+ 0.4
1158
+ (2.0, 0.000)
1159
+ 0.050
1160
+ (2.2, 0.000)
1161
+ 0.051
1162
+ (8.0, 0.000)
1163
+ 1.000
1164
+ 0.050
1165
+ 1.000
1166
+ S6
1167
+ 2
1168
+ 10
1169
+ 0.4
1170
+ (2.3, 0.001)
1171
+ 0.050
1172
+ (2.6, 0.002)
1173
+ 0.050
1174
+ (8.5, 0.001)
1175
+ 1.000
1176
+ 0.050
1177
+ 0.999
1178
+ S7
1179
+ 2
1180
+ 20
1181
+ 0.8
1182
+ (1.3, 0.000)
1183
+ 0.050
1184
+ (1.6, 0.000)
1185
+ 0.050
1186
+ (6.2, 0.000)
1187
+ 1.000
1188
+ 0.050
1189
+ 1.000
1190
+ S8
1191
+ 2
1192
+ 20
1193
+ 1.0
1194
+ (1.3, 0.000)
1195
+ 0.049
1196
+ (1.6, 0.000)
1197
+ 0.050
1198
+ (4.6, 0.000)
1199
+ 1.000
1200
+ 0.049
1201
+ 1.000
1202
+ S9
1203
+ 2
1204
+ 20
1205
+ 10.0
1206
+ (1.0, 0.000)
1207
+ 0.050
1208
+ (1.0, 0.000)
1209
+ 0.050
1210
+ (1.0, 0.000)
1211
+ 1.000
1212
+ 1.000
1213
+ 0.500
1214
+ S10
1215
+ 10
1216
+ 5
1217
+ 0.4
1218
+ (15.5, 0.004)
1219
+ 0.049
1220
+ (9.5, 0.002)
1221
+ 0.068
1222
+ (11.9, 0.001)
1223
+ 1.000
1224
+ 0.046
1225
+ 0.999
1226
+ S11
1227
+ 10
1228
+ 10
1229
+ 0.4
1230
+ (17.8, 0.020)
1231
+ 0.052
1232
+ (11.7, 0.012)
1233
+ 0.077
1234
+ (10.6, 0.004)
1235
+ 1.000
1236
+ 0.030
1237
+ 0.996
1238
+ S12
1239
+ 10
1240
+ 20
1241
+ 0.2
1242
+ (29.2, 0.883)
1243
+ 0.049
1244
+ (60.7, 0.732)
1245
+ 0.045
1246
+ (111.3, 0.399)
1247
+ 0.053
1248
+ 0.031
1249
+ 0.612
1250
+ S13
1251
+ 10
1252
+ 20
1253
+ 0.6
1254
+ (2.0, 0.008)
1255
+ 0.046
1256
+ (3.0, 0.004)
1257
+ 0.042
1258
+ (7.6, 0.001)
1259
+ 1.000
1260
+ 0.042
1261
+ 0.999
1262
+ S14
1263
+ 10
1264
+ 20
1265
+ 1.0
1266
+ (1.2, 0.000)
1267
+ 0.050
1268
+ (1.5, 0.000)
1269
+ 0.050
1270
+ (6.2, 0.000)
1271
+ 1.000
1272
+ 0.049
1273
+ 1.000
1274
+ S15
1275
+ 10
1276
+ 20
1277
+ 3.0
1278
+ (1.1, 0.000)
1279
+ 0.050
1280
+ (1.5, 0.000)
1281
+ 0.050
1282
+ (3.9, 0.000)
1283
+ 1.000
1284
+ 0.050
1285
+ 1.000
1286
+ S16
1287
+ 10
1288
+ 20
1289
+ 10.0
1290
+ (1.0, 0.000)
1291
+ 0.050
1292
+ (1.0, 0.000)
1293
+ 0.050
1294
+ (1.0, 0.000)
1295
+ 1.000
1296
+ 1.000
1297
+ 0.500
1298
+ trigger pattern with label corruption. Similar considerations
1299
+ can be drawn in the other settings. The results in the table have
1300
+ been obtained by letting θ = θ⋆ as stated in Eq. (10). To start
1301
+ with, we observe that when utilising θ∗ in BCB and BCP
1302
+ cases, the FPR values is close to 0.05 for all the settings,
1303
+ while in the PC case FPR is close to or less than 0.05 for
1304
+ all settings except for S9 and S16, whes benign and poisoned
1305
+ samples collapse into a single cluster. In addition to TPR and
1306
+ FPR, the table shows the average number of clusters (K) and
1307
+ the average outlier ratio (ζ) identified by DBSCAN.
1308
+ From the first group of rows (S1-S4), we see that for a
1309
+ given setting of minPts and ϵ, increasing d′ leads to a larger
1310
+ average number of clusters and a larger fraction of outliers,
1311
+ as the DBSCAN algorithm results in a higher number of
1312
+ densely-connected regions. A similar behaviour is observed
1313
+ by increasing minPts or decreasing ϵ for a given d′ (second
1314
+ and third group of rows in the table). Expectedly, when ϵ
1315
+ is too large, e.g. 10, DBSCAN always results in one cluster
1316
+ thus failing to identify the poisoned samples. Based on the
1317
+ result in Table I, the settings S7 (d′ = 2, minPts = 20,
1318
+ ϵ = 0.8) and S15 (d′ = 10, minPts = 20, ϵ = 3) yield
1319
+ the best performance, the former having lower computational
1320
+ complexity, because of the lower dimension used to cluster
1321
+ the samples in the feature space (d′ = 2 instead of 10).
1322
+ B. Threshold setting
1323
+ The thresholds θ∗ obtained following the approach detailed
1324
+ in Section V-C for AC and CI and CCA-UD, are reported in
1325
+ Table II for the three different classification tasks considered
1326
+ in our experiments. Given that the threshold is set by relying
1327
+ on the validation dataset, it is necessary to verify that the target
1328
+ false positive rate (0.05 in our case) is also obtained on the
1329
+ test dataset. An excerpt of such results is shown in Table IV
1330
+ by referring to MNIST task (a similar behaviour is observed
1331
+ for the other classification tasks).
1332
+ Our experiments reveal that, for AC and CI, the threshold
1333
+ determined via Eq. (10) does not lead to a good operating
1334
+ point when used on the test dataset. In particular, while for
1335
+ CCA-UD, the threshold θ∗ set on the validation dataset yields
1336
+ a similar FPR (around 0.05) in the BCB, BCP and PC
1337
+ TABLE II: Values of θ∗ obtained for the various classification
1338
+ tasks.
1339
+ Method
1340
+ MNIST
1341
+ Traffic signs
1342
+ Fashion clothes
1343
+ AC
1344
+ 0.335
1345
+ 0.404
1346
+ 0.301
1347
+ CI
1348
+ 3.018
1349
+ 1.673
1350
+ 4.738
1351
+ CCA-UD
1352
+ 0.950
1353
+ 0.950
1354
+ 0.950
1355
+ cases, this is not true for AC and CI, for which FPR(BCB),
1356
+ FPR(BCP ) and FPR(PC) are often smaller than 0.05,
1357
+ reaching 0 in many cases. This leads to a poor TPR(PC). In
1358
+ particular, with AC, when α > θ∗, both clusters are classified
1359
+ as benign, and then TPRα(PC) = FPRα(PC) = 0, even
1360
+ when the method would, in principle, be able to provide a
1361
+ perfect discrimination (AUCα ≈ 1). The difficulty in setting
1362
+ the threshold for AC and CI is also evident from the plots in
1363
+ Fig. 6, that report the FPR and TPR values averaged also
1364
+ on α, for different values of the threshold θ. From these plots,
1365
+ we immediately see that a threshold that works in all the cases
1366
+ can never be found for AC and CI.
1367
+ Due to the difficulties encountered to set the detection
1368
+ threshold for AC and CI10, the results at θ∗ for these methods
1369
+ are not reported in the other cases, that is, for traffic sign
1370
+ and fashion clothes classification, for which we report only
1371
+ the AUCα scores. Note that the possibility to set a unique
1372
+ threshold on a benign dataset that also works on poisoned
1373
+ datasets is very important for the practical applicability of a
1374
+ defence. Based on our results, CCA-UD has this remarkable
1375
+ property.
1376
+ C. Results on MNIST
1377
+ In this section, we evaluate the performance of CCA-UD
1378
+ against the three types of backdoor attacks, namely, 3×3
1379
+ corrupted, ramp corrupted, and 3×3 clean. Such performance
1380
+ as compared to those obtained by AC and CI. In Fig. 6, in each
1381
+ row, the three figures report the average performance of AC,
1382
+ CI and CCA-UD. The values of FPR(BCB), FPR(BCP ),
1383
+ TPR(PC) and FPR(PC) are reported for each method,
1384
+ as a function of the detection threshold θ. The behaviour of
1385
+ 10Note that the problem of threshold setting is not addressed in the original
1386
+ papers, since different threshold are used in the various cases.
1387
+
1388
+ JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021
1389
+ 10
1390
+ TABLE III: AUC scores of three methods in the three different
1391
+ attacks
1392
+ Method
1393
+ 3×3 corrupted
1394
+ Ramp corrupted
1395
+ 3×3 clean
1396
+ AC
1397
+ 0.728
1398
+ 0.733
1399
+ 0.785
1400
+ CI
1401
+ 0.964
1402
+ 0.178
1403
+ 0.488
1404
+ CCA-UD
1405
+ 0.994
1406
+ 0.996
1407
+ 0.981
1408
+ FPR(Dval), which is utilised to determine the threshold θ∗
1409
+ (at 0.05 of FPR(Dval)), is also reported. The position of θ∗
1410
+ is indicated by a vertical dotted line.
1411
+ By observing the figure, we see that CCA-UD outperforms
1412
+ by far the other two methods in all the settings. In the first
1413
+ setting, we achieve TPR(PC) and FPR(PC) equal to 0.983
1414
+ and 0.051 at the optimal threshold θ∗, with FPR(BCB) =
1415
+ 0.051 and FPR(BCP ) = 0.050. Instead, the performance
1416
+ achieved by AC and CI at their optimal threshold are very
1417
+ poor. Similar results are achieved for the second and third
1418
+ settings. In particular, for the second attack, CCA-UD achieves
1419
+ TPR(PC) and FPR(PC) equal to ( 0.975, 0.050) at θ∗, and
1420
+ (0.966, 0.050) for the third one.
1421
+ For a poisoned dataset, the AUC values obtained in the
1422
+ three settings are provided in Table III. From these results,
1423
+ we argue that CI has good discriminating capability (with
1424
+ an AUC only slightly lower than CCA-UD) against the first
1425
+ attack, but fails to defend against the other two. This is an
1426
+ expected behaviour since CI does not work when the triggering
1427
+ pattern is robust against average filtering, as it is the case of
1428
+ the ramp signal considered in the second attack, or with clean-
1429
+ label attacks, as it is the last setting.
1430
+ Table IV shows the results obtained for different values of
1431
+ the poisoning ratio α for the three different attacks. The values
1432
+ of FPR and TPR have been obtained by letting θ = θ∗.
1433
+ For the clean-label case, due to the difficulty of developing
1434
+ a successful attack [12], [27], [28], the backdoor can be
1435
+ successfully injected in the model only when α is large enough
1436
+ and, in any case, a successful attack could not always be
1437
+ obtained in the 10 repetitions. For this reason, in the third
1438
+ table, we report the number of successfully attacked classes
1439
+ (cnt) with different poisoning ratios. Upon inspection of Table
1440
+ IV, we observe that:
1441
+ • With regard to AC, the behaviour is similar under the three
1442
+ attack scenarios. Good results are achieved for intermediate
1443
+ values of α, namely in the [0.2, 0.3] range. When α < 0.134,
1444
+ instead, AUCα of AC is smaller than 0.786, and close
1445
+ to 0.5 for small α. In particular, AC cannot handle the
1446
+ backdoor attacks for which the poisoning ratio is smaller
1447
+ than 0.1. Moreover, when α > 0.5, AUCα goes to zero,
1448
+ as benign samples are judged as poisoned and vice-versa.
1449
+ Finally, by comparing the AUCα values in Fig. IVa and Fig.
1450
+ IVc, we see that AC achieves better performance against the
1451
+ corrupted-label attack than in the clean-label case.
1452
+ • With regard to CI, the detection performance achieved in
1453
+ the first attack scenario (3×3 corrupted) are good for all
1454
+ the values of α, with AUCα larger than 0.96 in most
1455
+ of the cases (with the exception of the smallest α, for
1456
+ which AUCα = 0.876), showing that CI can effectively
1457
+ defend against the backdoor attack in this setting, for every
1458
+ attack poisoning ratio. However, as expected, CI fails in the
1459
+ other settings, with AUCα lower than 0.5 in all the cases,
1460
+ confirming the limitations mentioned in Section III-A2.
1461
+ • Regarding CCA-UD, good results are achieved in all the-
1462
+ cases and for every value of α, with a perfect or nearly
1463
+ perfect AUCαin most of the cases. Moreover, by letting
1464
+ θ = θ∗, a very good TPRα(PC) is obtained, larger
1465
+ than 0.95 in almost all the cases, with FPRα(BCP ) and
1466
+ FPRα(PC) around 0.05. Overall, the tables prove the
1467
+ universality of CCA-UD that works very well regardless of
1468
+ the specific attack setting and regardless of the value of α.
1469
+ Note, since CCA-UD achieves a larger AUCα than AC and
1470
+ CI, CCA-UD outperforms AC and CI not only when θ = θ∗
1471
+ but also when θ is set adaptively.
1472
+ Finally, these results show that CCA-UD can effectively
1473
+ defend against both corrupted and clean-label attacks, thus
1474
+ confirming that the strategy used to detect poisoned clusters
1475
+ exploits a general misclassification behaviour present in both
1476
+ corrupted- and clean-label attacks.
1477
+ D. Results on Traffic Signs
1478
+ Fig. 7a-7c show the average performance of AC, CI, and
1479
+ CCA-UD on the traffic signs task. Similar considerations
1480
+ to the MNIST case can be made. CCA-UD achieves very
1481
+ good average performance at the operating point given by θ∗,
1482
+ where TPR(PC) and FPR(PC) are ( 0.965, 0.058) (with
1483
+ FPR(BCB) = FPR(BCB) ≈ 0.08), while for AC and CI
1484
+ a threshold that works well on the average can not be found.
1485
+ In the case of a poisoned dataset, the average AUC of the
1486
+ detection AUC is equal to 0.897, 0.958, 0.993 for AC, CI,
1487
+ and CCA-UD, respectively.
1488
+ We observe that CI gets a good AUC, too. In fact, in
1489
+ this case, given that the size of the input image is 28×28,
1490
+ the triggering pattern, namely the sinusoidal signal can be
1491
+ effectively removed by a 5 × 5 average filter.
1492
+ The results obtained for various α are reported in Table Va.
1493
+ As it can be seen, CCA-UD gets very good performance in
1494
+ terms of TPRα(PC) and FPRα(PC) measured at θ = θ∗
1495
+ in all the cases. The AUCα is also larger than that achieved
1496
+ by AC and CI for all values of α. As observed before, while
1497
+ CI is relatively insensitive to α, the performance of AC drop
1498
+ when α < 0.1 or α > 0.5.
1499
+ E. Results on Fashion Clothes
1500
+ Fig. 7d-7f report the results obtained by AC, CI, and CCA-
1501
+ UD on the fashion clothes task. Once again, the performance
1502
+ achieved by CCA-UD are largely superior to those achieved by
1503
+ AC and CI. In particular, by looking at Fig. 7d-7f, CCA-UD
1504
+ achieves TPR(PC) and FPR(PC) equal to (1.000, 0.053),
1505
+ with FPR(BCB) = FPR(BCP ) ≈ 0.05. Regarding the
1506
+ AUC scores, AUC of AC, CI, and CCA-UD are 0.900, 0.106,
1507
+ 0.997 respectively. Since the attack is carried out in a clean-
1508
+ label modality, the poor performance of CI were expected. The
1509
+ results for various α, reported in Table Vb, confirm the same
1510
+ behaviour, with CCA-UD getting very good performance in
1511
+ all the cases, always overcoming the other two methods.
1512
+
1513
+ JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021
1514
+ 11
1515
+ (a) AC in 3×3 corrupted
1516
+ (b) CI in 3×3 corrupted
1517
+ (c) CCA-UD in 3×3 corrupted
1518
+ (d) AC in ramp corrupted
1519
+ (e) CI in ramp corrupted
1520
+ (f) CCA-UD in ramp corrupted
1521
+ (g) AC in 3×3 clean
1522
+ (h) CI in 3×3 clean
1523
+ (i) CCA-UD in 3×3 clean
1524
+ Fig. 6: Average performance of AC and CI, and CCA-UD for different values of the threshold against the three types of
1525
+ backdoor attacks implemented in the case of MNIST classification. From top to bottom the plots refer to 3×3 corrupted in
1526
+ (a)-(c), ramp corrupted in (d)-(f), and 3×3 clean in (g)-(i). From left to right we report the performance of AC, CI and
1527
+ CCA-UD. The position of θ∗ is indicated by a vertical dotted line.
1528
+ TABLE IV: Performance of AC, CI and CCA-UD for various poisoning ratios α, against the three types of backdoor attacks
1529
+ for MNIST classification, The FPR and TPR values are computed at θ = θ∗. In the 3 × 3 table cnt indicates the number of
1530
+ successful attacks in 10 repetitions.
1531
+ AC
1532
+ CI
1533
+ CCA-UD
1534
+ α
1535
+ F P Rα(BCP )
1536
+ T P Rα(P C)
1537
+ F P Rα(P C)
1538
+ AUCα
1539
+ F P Rα(BCP )
1540
+ T P Rα(P C)
1541
+ F P Rα(P C)
1542
+ AUCα
1543
+ F P Rα(BCP )
1544
+ T P Rα(P C)
1545
+ F P Rα(P C)
1546
+ AUCα
1547
+ 0.025
1548
+ 0.025
1549
+ 0.000
1550
+ 0.000
1551
+ 0.563
1552
+ 0.012
1553
+ 0.324
1554
+ 0.022
1555
+ 0.876
1556
+ 0.050
1557
+ 0.908
1558
+ 0.051
1559
+ 0.949
1560
+ 0.050
1561
+ 0.055
1562
+ 0.099
1563
+ 0.000
1564
+ 0.628
1565
+ 0.005
1566
+ 0.581
1567
+ 0.001
1568
+ 0.977
1569
+ 0.050
1570
+ 0.989
1571
+ 0.050
1572
+ 0.994
1573
+ 0.096
1574
+ 0.000
1575
+ 0.395
1576
+ 0.000
1577
+ 0.757
1578
+ 0.005
1579
+ 0.654
1580
+ 0.000
1581
+ 0.996
1582
+ 0.050
1583
+ 0.999
1584
+ 0.050
1585
+ 0.999
1586
+ 0.134
1587
+ 0.000
1588
+ 0.792
1589
+ 0.000
1590
+ 0.958
1591
+ 0.009
1592
+ 0.559
1593
+ 0.002
1594
+ 0.990
1595
+ 0.051
1596
+ 0.999
1597
+ 0.050
1598
+ 1.000
1599
+ 0.186
1600
+ 0.000
1601
+ 0.994
1602
+ 0.000
1603
+ 0.997
1604
+ 0.000
1605
+ 0.577
1606
+ 0.001
1607
+ 0.985
1608
+ 0.050
1609
+ 1.000
1610
+ 0.050
1611
+ 1.000
1612
+ 0.258
1613
+ 0.000
1614
+ 0.993
1615
+ 0.000
1616
+ 0.997
1617
+ 0.014
1618
+ 0.540
1619
+ 0.070
1620
+ 0.961
1621
+ 0.050
1622
+ 1.000
1623
+ 0.050
1624
+ 1.000
1625
+ 0.359
1626
+ 0.000
1627
+ 0.000
1628
+ 0.000
1629
+ 0.998
1630
+ 0.000
1631
+ 0.571
1632
+ 0.005
1633
+ 0.964
1634
+ 0.050
1635
+ 1.000
1636
+ 0.050
1637
+ 1.000
1638
+ 0.550
1639
+ 0.000
1640
+ 0.000
1641
+ 0.000
1642
+ 0.001
1643
+ 0.000
1644
+ 0.829
1645
+ 0.000
1646
+ 0.953
1647
+ 0.050
1648
+ 1.000
1649
+ 0.050
1650
+ 1.000
1651
+ (a) 3×3 corrupted
1652
+ AC
1653
+ CI
1654
+ CCA-UD
1655
+ α
1656
+ F P Rα(BCP )
1657
+ T P Rα(P C)
1658
+ F P Rα(P C)
1659
+ AUCα
1660
+ F P Rα(BCP )
1661
+ T P Rα(P C)
1662
+ F P Rα(P C)
1663
+ AUCα
1664
+ F P Rα(BCP )
1665
+ T P Rα(P C)
1666
+ F P Rα(P C)
1667
+ AUCα
1668
+ 0.035
1669
+ 0.000
1670
+ 0.050
1671
+ 0.024
1672
+ 0.593
1673
+ 0.009
1674
+ 0.000
1675
+ 0.008
1676
+ 0.407
1677
+ 0.051
1678
+ 0.871
1679
+ 0.050
1680
+ 0.966
1681
+ 0.050
1682
+ 0.024
1683
+ 0.090
1684
+ 0.028
1685
+ 0.593
1686
+ 0.000
1687
+ 0.000
1688
+ 0.000
1689
+ 0.119
1690
+ 0.050
1691
+ 0.914
1692
+ 0.050
1693
+ 0.998
1694
+ 0.096
1695
+ 0.000
1696
+ 0.400
1697
+ 0.000
1698
+ 0.786
1699
+ 0.003
1700
+ 0.000
1701
+ 0.000
1702
+ 0.216
1703
+ 0.050
1704
+ 0.989
1705
+ 0.050
1706
+ 0.998
1707
+ 0.134
1708
+ 0.024
1709
+ 0.798
1710
+ 0.001
1711
+ 0.962
1712
+ 0.019
1713
+ 0.000
1714
+ 0.000
1715
+ 0.142
1716
+ 0.050
1717
+ 0.999
1718
+ 0.050
1719
+ 0.998
1720
+ 0.186
1721
+ 0.000
1722
+ 0.992
1723
+ 0.003
1724
+ 0.995
1725
+ 0.107
1726
+ 0.000
1727
+ 0.000
1728
+ 0.179
1729
+ 0.051
1730
+ 1.000
1731
+ 0.050
1732
+ 1.000
1733
+ 0.258
1734
+ 0.025
1735
+ 0.999
1736
+ 0.000
1737
+ 0.999
1738
+ 0.000
1739
+ 0.000
1740
+ 0.000
1741
+ 0.088
1742
+ 0.050
1743
+ 1.000
1744
+ 0.050
1745
+ 1.000
1746
+ 0.359
1747
+ 0.025
1748
+ 0.000
1749
+ 0.000
1750
+ 0.999
1751
+ 0.021
1752
+ 0.000
1753
+ 0.000
1754
+ 0.144
1755
+ 0.051
1756
+ 1.000
1757
+ 0.050
1758
+ 1.000
1759
+ 0.550
1760
+ 0.000
1761
+ 0.000
1762
+ 0.000
1763
+ 0.002
1764
+ 0.004
1765
+ 0.000
1766
+ 0.000
1767
+ 0.135
1768
+ 0.050
1769
+ 1.000
1770
+ 0.050
1771
+ 1.000
1772
+ (b) Ramp corrupted
1773
+ AC
1774
+ CI
1775
+ CCA-UD
1776
+ α
1777
+ cnt
1778
+ F P Rα(BCP )
1779
+ T P Rα(P C)
1780
+ F P Rα(P C)
1781
+ AUCα
1782
+ F P Rα(BCP )
1783
+ T P Rα(P C)
1784
+ F P Rα(P C)
1785
+ AUCα
1786
+ F P Rα(BCP )
1787
+ T P Rα(P C)
1788
+ F P Rα(P C)
1789
+ AUCα
1790
+ 0.050
1791
+ 2
1792
+ 0.000
1793
+ 0.000
1794
+ 0.000
1795
+ 0.441
1796
+ 0.000
1797
+ 0.683
1798
+ 0.835
1799
+ 0.438
1800
+ 0.051
1801
+ 0.642
1802
+ 0.050
1803
+ 0.809
1804
+ 0.069
1805
+ 3
1806
+ 0.000
1807
+ 0.000
1808
+ 0.000
1809
+ 0.533
1810
+ 0.000
1811
+ 0.667
1812
+ 0.667
1813
+ 0.296
1814
+ 0.050
1815
+ 0.952
1816
+ 0.050
1817
+ 0.972
1818
+ 0.096
1819
+ 3
1820
+ 0.000
1821
+ 0.000
1822
+ 0.000
1823
+ 0.528
1824
+ 0.000
1825
+ 0.333
1826
+ 0.333
1827
+ 0.595
1828
+ 0.050
1829
+ 0.951
1830
+ 0.050
1831
+ 0.972
1832
+ 0.134
1833
+ 3
1834
+ 0.000
1835
+ 0.000
1836
+ 0.000
1837
+ 0.610
1838
+ 0.000
1839
+ 0.667
1840
+ 0.667
1841
+ 0.539
1842
+ 0.050
1843
+ 0.975
1844
+ 0.050
1845
+ 0.987
1846
+ 0.186
1847
+ 5
1848
+ 0.000
1849
+ 0.384
1850
+ 0.003
1851
+ 0.746
1852
+ 0.000
1853
+ 0.600
1854
+ 0.600
1855
+ 0.471
1856
+ 0.051
1857
+ 0.982
1858
+ 0.050
1859
+ 0.991
1860
+ 0.258
1861
+ 5
1862
+ 0.000
1863
+ 0.929
1864
+ 0.011
1865
+ 0.959
1866
+ 0.000
1867
+ 0.601
1868
+ 0.644
1869
+ 0.516
1870
+ 0.050
1871
+ 0.994
1872
+ 0.051
1873
+ 0.996
1874
+ 0.359
1875
+ 5
1876
+ 0.000
1877
+ 0.315
1878
+ 0.000
1879
+ 0.975
1880
+ 0.000
1881
+ 0.206
1882
+ 0.213
1883
+ 0.437
1884
+ 0.050
1885
+ 0.993
1886
+ 0.050
1887
+ 0.996
1888
+ 0.450
1889
+ 5
1890
+ 0.000
1891
+ 0.000
1892
+ 0.000
1893
+ 0.969
1894
+ 0.009
1895
+ 0.729
1896
+ 0.786
1897
+ 0.554
1898
+ 0.050
1899
+ 0.997
1900
+ 0.050
1901
+ 0.998
1902
+ (c) 3×3 clean
1903
+
1904
+ 1.0
1905
+ TPR(PC)
1906
+ 0.8
1907
+ FPR(PC)
1908
+ e
1909
+ FPR(BCB)
1910
+ g
1911
+ rcenta
1912
+ 0.6
1913
+ FPR(BCp)
1914
+ 0.4
1915
+ FPR(Dval)
1916
+ per
1917
+ 0.2
1918
+ 0.0
1919
+ 0.0
1920
+ 0.2
1921
+ 0.4
1922
+ 0.5
1923
+ 0.1
1924
+ 0.3
1925
+ 01.0
1926
+ TPR(PC)
1927
+ 0.8
1928
+ FPR(PC)
1929
+ e
1930
+ FPR(BCB)
1931
+ g
1932
+ rcenta
1933
+ 0.6
1934
+ FPR(BCp)
1935
+ 0.4
1936
+ pel
1937
+ 0.2
1938
+ 0.0
1939
+ 1
1940
+ 2
1941
+ 3
1942
+ 5
1943
+ 4
1944
+ 01.0
1945
+ TPR(PC)
1946
+ 0.8
1947
+ FPR(PC)
1948
+ age
1949
+ FPR(BCB)
1950
+ rcenta
1951
+ 0.6
1952
+ FPR(BCp)
1953
+ 0.4
1954
+ FPR(Dval)
1955
+ per
1956
+ 0.2
1957
+ 0.0
1958
+ 0.0
1959
+ 0.2
1960
+ 0.4
1961
+ 0.6
1962
+ 0.8
1963
+ 1.0
1964
+ 01.0
1965
+ TPR(PC)
1966
+ 0.8
1967
+ FPR(PC)
1968
+ age
1969
+ FPR(BCB)
1970
+ rcenta
1971
+ 0.6
1972
+ FPR(BCp)
1973
+ 0.4
1974
+ FPR(Dval)
1975
+ per
1976
+ 0.2
1977
+ 0.0
1978
+ 0.0
1979
+ 0.1
1980
+ 0.2
1981
+ 0.3
1982
+ 0.4
1983
+ 0.5
1984
+ 01.0
1985
+ TPR(PC)
1986
+ 0.8
1987
+ FPR(PC)
1988
+ ge
1989
+ FPR(BCB)
1990
+ centa
1991
+ 0.6
1992
+ FPR(BCp)
1993
+ g
1994
+ 0.4
1995
+ FPR(D)
1996
+ pel
1997
+ 0.2
1998
+ 0.0
1999
+ 0
2000
+ 2
2001
+ 3
2002
+ 4
2003
+ 5
2004
+ L
2005
+ 01.0
2006
+ TPR(PC)
2007
+ 0.8
2008
+ FPR(PC)
2009
+ ercentage
2010
+ FPR(BCB)
2011
+ 0.6
2012
+ FPR(BCp)
2013
+ 0.4
2014
+ FPR(Dval)
2015
+ per
2016
+ 0.2
2017
+ 0.0
2018
+ 0.6
2019
+ 0.8
2020
+ 0.0
2021
+ 0.2
2022
+ 0.4
2023
+ 1.0
2024
+ 01.0
2025
+ TPR(PC)
2026
+ 0.8
2027
+ FPR(PC)
2028
+ e
2029
+ FPR(BCB)
2030
+ g
2031
+ rcenta
2032
+ 0.6
2033
+ FPR(BCp)
2034
+ 0.4
2035
+ FPR(Dval)
2036
+ per
2037
+ 0.2
2038
+ 0.0
2039
+ 0.0
2040
+ 0.1
2041
+ 0.2
2042
+ 0.3
2043
+ 0.5
2044
+ 0.4
2045
+ 01.0
2046
+ TPR(PC)
2047
+ 0.8
2048
+ FPR(PC)
2049
+ FPR(BCB)
2050
+ 0.6
2051
+ FPR(BCp)
2052
+ 0.4
2053
+ FPR(Dval)
2054
+ per
2055
+ 0.2
2056
+ 0.0
2057
+ 2
2058
+ 3
2059
+ L
2060
+ 4
2061
+ 51.0
2062
+ TPR(PC)
2063
+ 0.8
2064
+ FPR(PC)
2065
+ ge
2066
+ FPR(BCB)
2067
+ rcenta
2068
+ 0.6
2069
+ FPR(BCp)
2070
+ 0.4
2071
+ FPR(Dval)
2072
+ per
2073
+ 0.2
2074
+ 0.0
2075
+ 0.0
2076
+ 0.2
2077
+ 0.4
2078
+ 0.6
2079
+ 0.8
2080
+ 1.0
2081
+ 0JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021
2082
+ 12
2083
+ (a) AC in traffic signs task
2084
+ (b) CI in traffic signs task
2085
+ (c) CCA-UD in traffic signs task
2086
+ (d) AC in fashion clothes task
2087
+ (e) CI in fashion clothes task
2088
+ (f) CCA-UD in fashion clothes task
2089
+ Fig. 7: Average performance of AC, CI, and CCA-UD for different values of θ for the traffic signs and fashion clothes task.
2090
+ The vertical dotted line indicates the position of θ∗ for the various methods.
2091
+ TABLE V: Performance of AC, CI, and CCA-UD for various
2092
+ poisoning ratios for the traffic sign and fashion cloth task. The
2093
+ FPR and TPR values are computed at θ = θ∗. Since for AC
2094
+ and CI it is not possible to find a unique value of θ working
2095
+ in all conditions, we report only the AUC values.
2096
+ AC
2097
+ CI
2098
+ CCA-UD
2099
+ α
2100
+ cnt
2101
+ AUCα
2102
+ AUCα
2103
+ AUCα
2104
+ F P Rα(BCP )
2105
+ T P Rα(P C)
2106
+ F P Rα(P C)
2107
+ 0.050
2108
+ 9
2109
+ 0.793
2110
+ 0.923
2111
+ 0.983
2112
+ 0.073
2113
+ 0.946
2114
+ 0.061
2115
+ 0.096
2116
+ 9
2117
+ 0.850
2118
+ 0.928
2119
+ 0.991
2120
+ 0.058
2121
+ 0.998
2122
+ 0.059
2123
+ 0.134
2124
+ 9
2125
+ 0.949
2126
+ 0.959
2127
+ 0.992
2128
+ 0.057
2129
+ 0.998
2130
+ 0.057
2131
+ 0.186
2132
+ 10
2133
+ 0.958
2134
+ 0.965
2135
+ 0.993
2136
+ 0.064
2137
+ 0.999
2138
+ 0.056
2139
+ 0.359
2140
+ 13
2141
+ 0.946
2142
+ 0.965
2143
+ 0.996
2144
+ 0.086
2145
+ 0.985
2146
+ 0.054
2147
+ 0.450
2148
+ 14
2149
+ 0.917
2150
+ 0.965
2151
+ 0.994
2152
+ 0.070
2153
+ 0.980
2154
+ 0.055
2155
+ 0.550
2156
+ 15
2157
+ 0.869
2158
+ 0.996
2159
+ 0.999
2160
+ 0.059
2161
+ 0.999
2162
+ 0.051
2163
+ (a) Traffic signs
2164
+ AC
2165
+ CI
2166
+ CCA-UD
2167
+ α
2168
+ cnt
2169
+ AUCα
2170
+ AUCα
2171
+ AUCα
2172
+ F P Rα(BCP )
2173
+ T P Rα(P C)
2174
+ F P Rα(P C)
2175
+ 0.069
2176
+ 3
2177
+ 0.618
2178
+ 0.056
2179
+ 0.998
2180
+ 0.053
2181
+ 1.000
2182
+ 0.052
2183
+ 0.096
2184
+ 3
2185
+ 0.513
2186
+ 0.341
2187
+ 0.995
2188
+ 0.054
2189
+ 1.000
2190
+ 0.056
2191
+ 0.134
2192
+ 3
2193
+ 0.940
2194
+ 0.087
2195
+ 0.998
2196
+ 0.059
2197
+ 1.000
2198
+ 0.053
2199
+ 0.186
2200
+ 4
2201
+ 1.000
2202
+ 0.037
2203
+ 0.998
2204
+ 0.054
2205
+ 1.000
2206
+ 0.055
2207
+ 0.258
2208
+ 5
2209
+ 1.000
2210
+ 0.083
2211
+ 0.996
2212
+ 0.055
2213
+ 1.000
2214
+ 0.057
2215
+ 0.359
2216
+ 5
2217
+ 1.000
2218
+ 0.015
2219
+ 0.998
2220
+ 0.056
2221
+ 1.000
2222
+ 0.052
2223
+ 0.450
2224
+ 5
2225
+ 1.000
2226
+ 0.174
2227
+ 1.000
2228
+ 0.055
2229
+ 1.000
2230
+ 0.050
2231
+ (b) Fashion clothes
2232
+ VII. CONCLUDING REMARKS
2233
+ We have proposed a universal backdoor detection method,
2234
+ called CCA-UD, aiming at revealing the possible presence of a
2235
+ backdoor inside a model and identify the poisoned samples by
2236
+ analysing the training dataset. CCA-UD relies on DBSCAN
2237
+ clustering and on a new strategy for the detection of poisoned
2238
+ clusters based on the computation of clusters’ centroids. The
2239
+ capability of the centroids’ features to cause a misclassification
2240
+ of benign samples is exploited to decide whether a cluster is
2241
+ poisoned or not. We evaluated the effectiveness of CCA-UD
2242
+ on a wide variety of classification tasks and attack scenarios.
2243
+ The results confirm that the method can work regardless of the
2244
+ corruption strategy (corrupted and clean label setting) and the
2245
+ type of trigger used by the attacker (local or global pattern).
2246
+ Moreover, the method is effective regardless of the poisoning
2247
+ ratio used by the attacker, that can be either very small or even
2248
+ larger than 0.5. Furthermore, we proved that the performance
2249
+ achieved by CCA-UD are always superior to those achieved
2250
+ by the existing methods, also when these methods are applied
2251
+ in a scenario that meets their operating requirements.
2252
+ Future work will be devoted to the analysis of the behaviour
2253
+ of the proposed method against multiple triggers attacks, that
2254
+ is when multiple triggers are used to poison the samples,
2255
+ possibly to induce more than one malicious behaviour inside
2256
+ the network. The capability of the method to defend against
2257
+ backdoor attacks in application scenarios beyond image clas-
2258
+ sification, is also worth investigation.
2259
+ REFERENCES
2260
+ [1] I. J. Goodfellow, J. Shlens, and C. Szegedy, “Explaining and harnessing
2261
+ adversarial examples,” in 3rd International Conference on Learning
2262
+ Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015,
2263
+ Conference Track Proceedings, Y. Bengio and Y. LeCun, Eds., 2015.
2264
+ [2] A. Kurakin, I. J. Goodfellow, and S. Bengio, “Adversarial examples in
2265
+ the physical world,” in 5th International Conference on Learning Rep-
2266
+ resentations, ICLR 2017, Toulon, France, April 24-26, 2017, Workshop
2267
+ Track Proceedings.
2268
+ OpenReview.net, 2017.
2269
+ [3] A. Kurakin, I. Goodfellow, and S. Bengio, “Adversarial machine learning
2270
+ at scale,” arXiv preprint arXiv:1611.01236, 2016.
2271
+ [4] B. Biggio, B. Nelson, and P. Laskov, “Poisoning attacks against support
2272
+ vector machines,” in Proceedings of the 29th International Conference
2273
+ on Machine Learning, ICML 2012, Edinburgh, Scotland, UK, June 26
2274
+ - July 1, 2012.
2275
+ icml.cc / Omnipress, 2012.
2276
+ [5] S. Weerasinghe, T. Alpcan, S. M. Erfani, and C. Leckie, “Defending
2277
+ support vector machines against data poisoning attacks,” IEEE Trans.
2278
+ Inf. Forensics Secur., vol. 16, pp. 2566–2578, 2021.
2279
+ [6] W. Guo, B. Tondi, and M. Barni, “A master key backdoor for univer-
2280
+ sal impersonation attack against dnn-based face verification,” Pattern
2281
+ Recognit. Lett., vol. 144, pp. 61–67, 2021.
2282
+ [7] X. Chen, C. Liu, B. Li, K. Lu, and D. Song, “Targeted backdoor
2283
+ attacks on deep learning systems using data poisoning,” CoRR, vol.
2284
+ abs/1712.05526, 2017.
2285
+ [8] W. Guo, B. Tondi, and M. Barni, “A temporal chrominance trigger for
2286
+ clean-label backdoor attack against anti-spoof rebroadcast detection,”
2287
+ CoRR, vol. abs/2206.01102, 2022.
2288
+ [9] T. Gu, B. Dolan-Gavitt, and S. Garg, “Badnets: Identifying vulner-
2289
+ abilities in the machine learning model supply chain,” CoRR, vol.
2290
+ abs/1708.06733, 2017.
2291
+ [10] W. Guo, B. Tondi, and M. Barni, “An overview of backdoor attacks
2292
+ against deep neural networks and possible defences,” IEEE Open Journal
2293
+ of Signal Processing, vol. 3, pp. 261–287, 2022.
2294
+ [11] A. Turner, D. Tsipras, and A. Madry, “Label-consistent backdoor at-
2295
+ tacks,” arXiv preprint arXiv:1912.02771, 2019.
2296
+ [12] M. Barni, K. Kallas, and B. Tondi, “A new backdoor attack in CNNS
2297
+ by training set corruption without label poisoning,” in 2019 IEEE
2298
+ International Conference on Image Processing, ICIP 2019, Taipei,
2299
+ Taiwan, September 22-25, 2019.
2300
+ IEEE, 2019, pp. 101–105.
2301
+
2302
+ 1.0
2303
+ TPR(PC)
2304
+ 0.8
2305
+ FPR(PC)
2306
+ e
2307
+ FPR(BCB)
2308
+ g
2309
+ rcenta
2310
+ 0.6
2311
+ FPR(BCp)
2312
+ 0.4
2313
+ FPR(Dval)
2314
+ pel
2315
+ 0.2
2316
+ 0.0
2317
+ 0.2
2318
+ 0.3
2319
+ 0.0
2320
+ 0.1
2321
+ 0.4
2322
+ 0.5
2323
+ 01.0
2324
+ TPR(PC)
2325
+ 0.8
2326
+ FPR(PC)
2327
+ e
2328
+ FPR(BCB)
2329
+ g
2330
+ rcenta
2331
+ 0.6
2332
+ FPR(BCp)
2333
+ 0.4
2334
+ FPR(Dv
2335
+ per
2336
+ 0.2
2337
+ 0.0
2338
+ 0
2339
+ 1
2340
+ 2
2341
+ 3
2342
+ 4
2343
+ 5
2344
+ 01.0
2345
+ TPR(PC)
2346
+ 0.8
2347
+ FPR(PC)
2348
+ e
2349
+ rcentage
2350
+ FPR(BCB)
2351
+ 0.6-
2352
+ FPR(BCp)
2353
+ 0.4
2354
+ FPR(Dval)
2355
+ per
2356
+ 0.2
2357
+ 0.0
2358
+ 0.0
2359
+ 0.2
2360
+ 0.4
2361
+ 0.6
2362
+ 0.8
2363
+ 1.0
2364
+ 01.0
2365
+ TPR(PC)
2366
+ 0.8
2367
+ FPR(PC)
2368
+ age
2369
+ FPR(BCB)
2370
+ rcenta
2371
+ 0.6
2372
+ FPR(BCp)
2373
+ 0.4
2374
+ FPR(Dval)
2375
+ per
2376
+ 0.2
2377
+ 0.0
2378
+ 0.0
2379
+ 0.1
2380
+ 0.2
2381
+ 0.3
2382
+ 0.4
2383
+ 0.5
2384
+ 01.0
2385
+ TPR(PC)
2386
+ 0.8
2387
+ FPR(PC)
2388
+ rcentage
2389
+ FPR(BCB)
2390
+ 0.6
2391
+ FPR(BCp)
2392
+ 0.4
2393
+ FPR(Dval)
2394
+ per
2395
+ 0.2
2396
+ 0.0 -
2397
+ 0
2398
+ 2
2399
+ 3
2400
+ 1
2401
+ 4
2402
+ 5
2403
+ 01.0
2404
+ TPR(PC)
2405
+ 0.8
2406
+ FPR(PC)
2407
+ age
2408
+ FPR(BCB)
2409
+ rcenta
2410
+ 0.6
2411
+ FPR(BCp)
2412
+ 0.4
2413
+ FPR(Dval)
2414
+ per
2415
+ 0.2
2416
+ 0.0
2417
+ 0.0
2418
+ 0.2
2419
+ 0.4
2420
+ 0.6
2421
+ 0.8
2422
+ 1.0
2423
+ 0JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2021
2424
+ 13
2425
+ [13] B. Chen, W. Carvalho, N. Baracaldo, H. Ludwig, B. Edwards, T. Lee,
2426
+ I. M. Molloy, and B. Srivastava, “Detecting backdoor attacks on deep
2427
+ neural networks by activation clustering,” in Workshop on Artificial
2428
+ Intelligence Safety 2019 co-located with the Thirty-Third AAAI Con-
2429
+ ference on Artificial Intelligence 2019 (AAAI-19), Honolulu, Hawaii,
2430
+ January 27, 2019, ser. CEUR Workshop Proceedings, H. Espinoza, S. ´O.
2431
+ h´Eigeartaigh, X. Huang, J. Hern´andez-Orallo, and M. Castillo-Effen,
2432
+ Eds., vol. 2301.
2433
+ CEUR-WS.org, 2019.
2434
+ [14] J. Yadav and M. Sharma, “A review of k-mean algorithm,” Int. J. Eng.
2435
+ Trends Technol, vol. 4, no. 7, pp. 2972–2976, 2013.
2436
+ [15] Z. Xiang, D. J. Miller, and G. Kesidis, “A benchmark study of backdoor
2437
+ data poisoning defenses for deep neural network classifiers and A novel
2438
+ defense,” in 29th IEEE International Workshop on Machine Learning
2439
+ for Signal Processing, MLSP 2019, Pittsburgh, PA, USA, October 13-
2440
+ 16, 2019.
2441
+ IEEE, 2019, pp. 1–6.
2442
+ [16] S. R. Bond, A. Hoeffler, and J. R. Temple, “Gmm estimation of empirical
2443
+ growth models,” Available at SSRN 290522, 2001.
2444
+ [17] A. A. Neath and J. E. Cavanaugh, “The bayesian information crite-
2445
+ rion: background, derivation, and applications,” Wiley Interdisciplinary
2446
+ Reviews: Computational Statistics, vol. 4, no. 2, pp. 199–203, 2012.
2447
+ [18] B. Tran, J. Li, and A. Madry, “Spectral signatures in backdoor attacks,”
2448
+ in Advances in Neural Information Processing Systems 31: Annual
2449
+ Conference on Neural Information Processing Systems 2018, NeurIPS
2450
+ 2018, December 3-8, 2018, Montr´eal, Canada, S. Bengio, H. M.
2451
+ Wallach, H. Larochelle, K. Grauman, N. Cesa-Bianchi, and R. Garnett,
2452
+ Eds., 2018, pp. 8011–8021.
2453
+ [19] J.
2454
+ Hayase,
2455
+ W.
2456
+ Kong,
2457
+ R.
2458
+ Somani,
2459
+ and
2460
+ S.
2461
+ Oh,
2462
+ “SPECTRE:
2463
+ defending
2464
+ against
2465
+ backdoor
2466
+ attacks
2467
+ using
2468
+ robust
2469
+ statistics,”
2470
+ CoRR,
2471
+ vol.
2472
+ abs/2104.11315,
2473
+ 2021.
2474
+ [Online].
2475
+ Available:
2476
+ https:
2477
+ //arxiv.org/abs/2104.11315
2478
+ [20] H. Abdi, “Singular value decomposition (svd) and generalized singular
2479
+ value decomposition,” Encyclopedia of measurement and statistics, pp.
2480
+ 907–912, 2007.
2481
+ [21] S. Shan, A. N. Bhagoji, H. Zheng, and B. Y. Zhao, “Poison forensics:
2482
+ Traceback of data poisoning attacks in neural networks,” in 31st USENIX
2483
+ Security Symposium, USENIX Security 2022, Boston, MA, USA, August
2484
+ 10-12, 2022, K. R. B. Butler and K. Thomas, Eds.
2485
+ USENIX Associa-
2486
+ tion, 2022, pp. 3575–3592.
2487
+ [22] N. Peri, N. Gupta, W. R. Huang, L. Fowl, C. Zhu, S. Feizi, T. Goldstein,
2488
+ and J. P. Dickerson, “Deep k-nn defense against clean-label data
2489
+ poisoning attacks,” in Computer Vision - ECCV 2020 Workshops -
2490
+ Glasgow, UK, August 23-28, 2020, Proceedings, Part I, ser. Lecture
2491
+ Notes in Computer Science, A. Bartoli and A. Fusiello, Eds., vol. 12535.
2492
+ Springer, 2020, pp. 55–70.
2493
+ [23] A. Shafahi, W. R. Huang, M. Najibi, O. Suciu, C. Studer, T. Dumitras,
2494
+ and T. Goldstein, “Poison frogs! targeted clean-label poisoning attacks
2495
+ on neural networks,” in Advances in Neural Information Processing
2496
+ Systems 31: Annual Conference on Neural Information Processing
2497
+ Systems 2018, NeurIPS 2018, December 3-8, 2018, Montr´eal, Canada,
2498
+ S. Bengio, H. M. Wallach, H. Larochelle, K. Grauman, N. Cesa-Bianchi,
2499
+ and R. Garnett, Eds., 2018, pp. 6106–6116.
2500
+ [24] D. Tang, X. Wang, H. Tang, and K. Zhang, “Demon in the variant:
2501
+ Statistical analysis of dnns for robust backdoor contamination detection,”
2502
+ in 30th USENIX Security Symposium, USENIX Security 2021, August
2503
+ 11-13, 2021, M. Bailey and R. Greenstadt, Eds.
2504
+ USENIX Association,
2505
+ 2021, pp. 1541–1558.
2506
+ [25] T. K. Moon, “The expectation-maximization algorithm,” IEEE Signal
2507
+ processing magazine, vol. 13, no. 6, pp. 47–60, 1996.
2508
+ [26] M. Ester, H. Kriegel, J. Sander, and X. Xu, “A density-based algorithm
2509
+ for discovering clusters in large spatial databases with noise,” in Pro-
2510
+ ceedings of the Second International Conference on Knowledge Discov-
2511
+ ery and Data Mining (KDD-96), Portland, Oregon, USA, E. Simoudis,
2512
+ J. Han, and U. M. Fayyad, Eds.
2513
+ AAAI Press, 1996, pp. 226–231.
2514
+ [27] S. Zhao, X. Ma, X. Zheng, J. Bailey, J. Chen, and Y.-G. Jiang, “Clean-
2515
+ label backdoor attacks on video recognition models,” in Proceedings of
2516
+ the IEEE/CVF Conference on Computer Vision and Pattern Recognition,
2517
+ 2020, pp. 14 443–14 452.
2518
+ [28] W. Guo, B. Tondi, and M. Barni, “A temporal chrominance trigger for
2519
+ clean-label backdoor attack against anti-spoof rebroadcast detection,”
2520
+ arXiv preprint arXiv:2206.01102, 2022.
2521
+ [29] Y. LeCun, Y. Bengio, and G. Hinton, “Deep learning,” nature, vol. 521,
2522
+ no. 7553, pp. 436–444, 2015.
2523
+ [30] S. Wold, K. Esbensen, and P. Geladi, “Principal component analysis,”
2524
+ Chemometrics and intelligent laboratory systems, vol. 2, no. 1-3, pp.
2525
+ 37–52, 1987.
2526
+ [31] A. Gupta and N. Shekokar, “A novel k-means l-layer algorithm for un-
2527
+ even clustering in wsn,” in 2017 International Conference on Computer,
2528
+ Communication and Signal Processing (ICCCSP).
2529
+ IEEE, 2017, pp.
2530
+ 1–6.
2531
+ [32] J. Goldberger, S. Gordon, H. Greenspan et al., “An efficient image
2532
+ similarity measure based on approximations of kl-divergence between
2533
+ two gaussian mixtures.” in ICCV, vol. 3, 2003, pp. 487–493.
2534
+ [33] L. Rokach and O. Maimon, “Clustering methods,” in Data mining and
2535
+ knowledge discovery handbook.
2536
+ Springer, 2005, pp. 321–352.
2537
+ [34] M. Ankerst, M. M. Breunig, H.-P. Kriegel, and J. Sander, “Optics:
2538
+ Ordering points to identify the clustering structure,” ACM Sigmod
2539
+ record, vol. 28, no. 2, pp. 49–60, 1999.
2540
+ [35] R. J. Campello, D. Moulavi, A. Zimek, and J. Sander, “Hierarchical
2541
+ density estimates for data clustering, visualization, and outlier detection,”
2542
+ ACM Transactions on Knowledge Discovery from Data (TKDD), vol. 10,
2543
+ no. 1, pp. 1–51, 2015.
2544
+ [36] L. McInnes, J. Healy, and J. Melville, “Umap: Uniform manifold
2545
+ approximation and projection for dimension reduction,” arXiv preprint
2546
+ arXiv:1802.03426, 2018.
2547
+ [37] M. K¨oppen, “The curse of dimensionality,” in 5th online world confer-
2548
+ ence on soft computing in industrial applications (WSC5), vol. 1, 2000,
2549
+ pp. 4–8.
2550
+ [38] B. Wang, Y. Yao, S. Shan, H. Li, B. Viswanath, H. Zheng, and B. Y.
2551
+ Zhao, “Neural cleanse: Identifying and mitigating backdoor attacks in
2552
+ neural networks,” in 2019 IEEE Symposium on Security and Privacy,
2553
+ SP 2019, San Francisco, CA, USA, May 19-23, 2019.
2554
+ IEEE, 2019, pp.
2555
+ 707–723.
2556
+ [39] A. Salem, R. Wen, M. Backes, S. Ma, and Y. Zhang, “Dynamic backdoor
2557
+ attacks against machine learning models,” in 2022 IEEE 7th European
2558
+ Symposium on Security and Privacy (EuroS&P). IEEE, 2022, pp. 703–
2559
+ 718.
2560
+ [40] M. Xue, C. He, J. Wang, and W. Liu, “One-to-n & n-to-one: Two
2561
+ advanced backdoor attacks against deep learning models,” IEEE Trans-
2562
+ actions on Dependable and Secure Computing, 2020.
2563
+ [41] Y. LeCun and C. Cortes, “MNIST handwritten digit database,” 2010.
2564
+ [Online]. Available: http://yann.lecun.com/exdb/mnist/
2565
+ [42] Pytorch, “4-layer dnn model,” https://github.com/pytorch/examples/blob/
2566
+ main/mnist/main.py#L11.
2567
+ [43] K. He, X. Zhang, S. Ren, and J. Sun, “Deep residual learning for image
2568
+ recognition,” in Proceedings of the IEEE conference on computer vision
2569
+ and pattern recognition, 2016, pp. 770–778.
2570
+ [44] A. Krizhevsky, “One weird trick for parallelizing convolutional neural
2571
+ networks,” arXiv preprint arXiv:1404.5997, 2014.
2572
+
gNE3T4oBgHgl3EQffgrq/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
hNAzT4oBgHgl3EQf4f5B/content/tmp_files/2301.01844v1.pdf.txt ADDED
@@ -0,0 +1,2136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Solving Unsplittable Network Flow Problems with
2
+ Decision Diagrams
3
+ Hosseinali Salemi, Danial Davarnia
4
+ Department of Industrial and Manufacturing Systems Engineering, Iowa State University, Ames, IA 50011,
5
+ hsalemi@iastate.edu, davarnia@iastate.edu
6
+ In unsplittable network flow problems, certain nodes must satisfy a combinatorial requirement that the
7
+ incoming arc flows cannot be split or merged when routed through outgoing arcs. This so-called no-split
8
+ no-merge requirement arises in unit train scheduling where train consists should remain intact at stations
9
+ that lack necessary equipment and manpower to attach/detach them. Solving the unsplittable network
10
+ flow problems with standard mixed-integer programming formulations is computationally difficult due to
11
+ the large number of binary variables needed to determine matching pairs between incoming and outgoing
12
+ arcs of nodes with no-split no-merge constraint. In this paper, we study a stochastic variant of the unit
13
+ train scheduling problem where the demand is uncertain. We develop a novel decision diagram (DD)-based
14
+ framework that decomposes the underlying two-stage formulation into a master problem that contains the
15
+ combinatorial requirements, and a subproblem that models a continuous network flow problem. The master
16
+ problem is modeled by a DD in a transformed space of variables with a smaller dimension, leading to a
17
+ substantial improvement in solution time. Similarly to the Benders decomposition technique, the subproblems
18
+ output cutting planes that are used to refine the master DD. Computational experiments show a significant
19
+ improvement in solution time of the DD framework compared with that of standard methods.
20
+ Key words : Decision Diagrams; Network Optimization; Mixed Integer Programs; Unit Trains;
21
+ Transportation
22
+ History :
23
+ 1.
24
+ Introduction
25
+ Over the past several decades, rail freight transportation has continued to grow as the prime
26
+ means of transportation for high-volume commodities. Advantages of rail transportation include
27
+ reliability, safety, cost-efficiency and environmental-sustainability as compared with alternative
28
+ methods of transportation. In terms of scale, the rail network accounted for 27.2 percent of U.S.
29
+ freight shipment by ton-miles in 2018 (Furchtgott-Roth et al. 2021); see Figure 1. The Federal
30
+ Highway Administration estimates that the total U.S. freight shipments will be 24.1 billion tons
31
+ in 2040, a 30 percent increase from the 2018 total transportation of 18.6 billion tons. With the
32
+ purpose of meeting such market growth, America’s freight railway companies have invested nearly
33
+ 1
34
+ arXiv:2301.01844v1 [math.OC] 4 Jan 2023
35
+
36
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
37
+ 2
38
+ $740 billion on capital expenditures and maintenance from 1980 to 2020 (Association of American
39
+ Railroads 2021).
40
+ Figure 1
41
+ Pie chart for ton-miles of freight shipments by mode within the U.S. in 2018. Multiple modes includes
42
+ mail. Air and truck-air with the share of 0.1% are omitted.
43
+ To reduce rail freight transportation costs and shipment delays, railroad companies offer unit
44
+ train services for carrying high-volume products. Unit trains haul a single type freight in a way that
45
+ no car is attached or detached while the cargo train is on its way from an origin to a destination,
46
+ except in specific locations that are equipped with required manpower and machinery. These trains
47
+ usually operate all day, use dedicated equipment, and can be loaded/unloaded in 24 hours. They
48
+ are known to be one of the fastest and most efficient means of railroad transportation. (Association
49
+ of American Railroads 2021). Traditionally, unit trains are used to carry bulk cargo such as coal,
50
+ grain, cement, and rock. Bulk liquids like crude oil and food such as wheat and corn are also
51
+ shipped by unit trains. According to the Federal Railroad Administration data, bulk commodities
52
+ account for 91 percent of the U.S. railroad freights. Approximately all coal shipped through railways
53
+ in the U.S. are transported by unit trains. Moreover, these trains contribute significantly to the
54
+ shipping process of crude oil as each unit train is capable of carrying 85,000 barrels (Association
55
+ of American Railroads 2021). In an operational level, the core unit train model can be described
56
+ as follows. Given a set of supply, intermediate, and demand locations in a railroad network, the
57
+ unit train scheduling problem seeks to find optimal routes for unit trains to send flows from supply
58
+ to demand points with the objective of minimizing the total transportation cost while meeting
59
+ demand of customers, respecting capacities of tracks, and satisfying no-car attaching/detaching
60
+ requirements in specific locations. As a result, designing blocking plans to determine locations
61
+
62
+ Multiple modes
63
+ 8%
64
+ Pipeline, 19%
65
+ Truck, 39%
66
+ Water, 7%
67
+ Rail, 27%Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
68
+ 3
69
+ where cars need to be switched between trains is irrelevant in this problem, unlike scheduling other
70
+ types of trains (Davarnia et al. 2019).
71
+ Despite the significance of unit train scheduling, exact optimization approaches to solve associ-
72
+ ated problems are scarce, partially due to their structural complexities. One of the main challenges
73
+ in modeling unit trains is the requirement that the train consists must remain intact when passing
74
+ through stations that lack necessary busting/formation equipment. In optimization, this require-
75
+ ment is referred to as no-split no-merge (NSNM), which guarantees that the flows entering to or
76
+ exiting from certain nodes of the unit train network cannot be split or merged. Incorporating this
77
+ requirement into typical transportation network models yields the so-called generalized unsplittable
78
+ flow problem (GUFP), where the objective is to determine the minimum-cost unit train sched-
79
+ ules that satisfy the given demand. Numerous studies have shown that considering deterministic
80
+ demands might result in the complete failure of the transportation scheduling (Demir et al. 2016,
81
+ Layeb et al. 2018), motivating the study of stochastic variants of the unit train scheduling problems
82
+ where the demand is uncertain. As a result, in this paper, we consider a stochastic variant of the
83
+ GUFP, referred to SGUFP, that is modeled as a two-stage optimization problem. The first stage
84
+ decides a matching between the incoming and outgoing arcs of the nodes of the railroad network,
85
+ and the second stage determines the amount of flow that should be sent through the matching arcs
86
+ of the network to satisfy the uncertain demand represented by a number of demand scenarios. We
87
+ propose a novel exact solution framework to solve this problem in the operational level.
88
+ Our proposed methodology is based on decision diagrams (DDs), which are compact graphical
89
+ data structures. DDs were initially introduced to represent boolean functions with applications in
90
+ circuit design. Over the past decade, researchers have successfully extended DDs domain by devel-
91
+ oping DD-based algorithms to solve discrete optimization problems in different areas of application.
92
+ Because of its structural limitation to model integer programs only, DDs have never been used
93
+ to solve transportation problems that inherently include continuous variables. In this paper, we
94
+ extend the application scope of DDs by introducing a novel framework that is capable of modeling
95
+ network problems with both integer and continuous components as in the SGUFP.
96
+ 1.1.
97
+ Literature Review on Train Scheduling
98
+ Many variants of train routing and scheduling problems with different objective functions and
99
+ set of constraints under deterministic and stochastic conditions have been introduced and vastly
100
+ studied in the literature; see surveys by Cordeau, Toth, and Vigo (1998), Harrod and Gorman
101
+ (2010), Lusby et al. (2011), Cacchiani and Toth (2012), and Turner et al. (2016) for different
102
+ problems classifications and structures. Mixed integer linear and nonlinear programming formu-
103
+ lations are among the most frequent exact approaches to model different classes of these prob-
104
+ lems (Jovanovi´c and Harker 1991, Huntley et al. 1995, Sherali and Suharko 1998, Lawley et al.
105
+
106
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
107
+ 4
108
+ 2008, Haahr and Lusby 2017, Davarnia et al. 2019). Proposed solution techniques include but are
109
+ not limited to branch-and-bound methods (Jovanovi´c and Harker 1991, Fuchsberger and L¨uthi
110
+ 2007), branch-and-cut frameworks (Zwaneveld, Kroon, and Van Hoesel 2001, Ceselli et al. 2008),
111
+ branch-and-price approaches (Lusby 2008, Lin and Kwan 2016), graph coloring algorithms (Cor-
112
+ nelsen and Di Stefano 2007), and heuristics (Carey and Crawford 2007, Liu and Kozan 2011, I¸cy¨uz
113
+ et al. 2016). Rolling stock scheduling (Abbink et al. 2004, Alfieri et al. 2006, Haahr et al. 2016,
114
+ Bornd¨orfer et al. 2016) that assigns rolling stocks to a given timetable, and crew scheduling (Kwan
115
+ 2011, Shen et al. 2013, Heil, Hoffmann, and Buscher 2020) that covers train activities by assigning
116
+ crews to the associated operations are other major problems arising in the area of railroad planning.
117
+ Due to the inherent uncertainty in different types of train scheduling and routing problems, many
118
+ researchers have studied stochastic variants of the problems where the supply/demand is considered
119
+ to be uncertain. Jordan and Turnquist (1983) propose a model for railroad car distribution where
120
+ supply and demand of cars are uncertain. Jin et al. (2019) study a chance-constrained programming
121
+ model for the train stop planning problem under stochastic demand. Ying, Chow, and Chin (2020)
122
+ propose a deep reinforcement learning approach for train scheduling where the passenger demand
123
+ is uncertain. Recently, Gong et al. (2021) propose a stochastic optimization method to solve a train
124
+ timetabling problem with uncertain passenger demand. Also see works by Meng and Zhou (2011),
125
+ Quaglietta, Corman, and Goverde (2013), Larsen et al. (2014) that consider train dispatching
126
+ problems under stochastic environments.
127
+ In the context of unit train scheduling, Lawley et al. (2008) study a time-space network flow
128
+ model to schedule bulk railroad deliveries for unit trains. In their model, the authors consider char-
129
+ acteristics of underlying rail network, demands of customers, and capacities of tracks, stations, and
130
+ loading/unloading requirements. They propose a mixed integer programming (MIP) formulation
131
+ that maximizes the demand satisfaction while minimizing the waiting time at stations. Lin and
132
+ Kwan (2014) (cf. Lin and Kwan (2016)) propose a model for a train scheduling problem that is capa-
133
+ ble to capture locations where coupling/decoupling is forbidden. They develop a branch-and-price
134
+ algorithm inspired by column generation to solve the associated problem. Lin and Kwan (2018)
135
+ also propose a heuristic branch-and-bound approach to decrease coupling/decoupling redundancy.
136
+ I¸cy¨uz et al. (2016) study the problem of planning coal unit trains that includes train formation,
137
+ routing, and scheduling. As noted by the authors, their proposed MIP formulation fails to solve
138
+ the problem directly due to its large size. As a remedy, they develop a time-efficient heuristic that
139
+ produces good quality solutions. More recently, Davarnia et al. (2019) introduce and study the
140
+ GUFP with application to unit train scheduling. In particular, the authors show how to impose
141
+ NSNM restrictions in network optimization problems. They present a polyhedral study and pro-
142
+ pose a MIP formulation to model a stylized variant of the unit train scheduling problem. In the
143
+ present paper, we use their formulation (see section 3.1) as a basis for our solution framework.
144
+
145
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
146
+ 5
147
+ The unsplittable flow problem (UFP) was first introduced by Kleinberg (1996) as a generalization
148
+ of the disjoint path problem. Given a network with capacities for arcs and a set of source-terminal
149
+ vertex pairs with associated demands and rewards, the objective in the UFP is to maximize the
150
+ total revenue by selecting a subset of source-terminal pairs and routing flows through a single
151
+ path for each of them to satisfy the associated demand. In the GUFP, however, there can exist
152
+ nodes that do not need to respect the NSNM requirement, and demands can be satisfied by
153
+ passing flows through multiple paths. It is well-known that different variants of UFP are NP-
154
+ hard (Baier, K¨ohler, and Skutella 2005, Kolman and Scheideler 2006, Chakrabarti et al. 2007). Since
155
+ its introduction, the UFP structure has been used in different areas of application, from bandwidth
156
+ allocation in heterogeneous networks (Kolman and Scheideler 2006), to survivable connection-
157
+ oriented networks (Walkowiak 2006), and virtual circuit routing problems (Hu, Lan, and Wan
158
+ 2009). Considering the hardness of the problem, approximation algorithms have been a common
159
+ technique to tackle different variants of the UFP in the literature (Baier, K¨ohler, and Skutella
160
+ 2005, Chakrabarti et al. 2007).
161
+ 1.2.
162
+ Literature Review on Decision Diagrams
163
+ DDs are directed acyclic graphs with a source and a terminal node where each source-terminal path
164
+ encodes a feasible solution to an optimization problem. In DDs, each layer from the source to the
165
+ terminal represents a decision variable where labels of arcs show their values. Had˘zi´c and Hooker
166
+ (2006) proposed to use DDs to model the feasible region of a discrete optimization problem and used
167
+ it for postoptimality analysis. Later, Andersen et al. (2007) presented relaxed DDs to circumvent
168
+ the exponential growth rate in the DD size when modeling large discrete optimization problems.
169
+ Bergman et al. (2016b) introduced a branch-and-bound algorithm that iteratively uses relaxed and
170
+ restricted DDs to find optimal solution. The literature contains many successful utilization of DDs
171
+ in different domains; see works by Bergman and Cire (2018), Serra and Hooker (2019), Davarnia
172
+ and Van Hoeve (2020), Gonzalez et al. (2020), and Hosseininasab and Van Hoeve (2021) for some
173
+ examples.
174
+ Until recently, applications of DDs were limited to discrete problems, and the question on how to
175
+ use DDs in solving optimization problems with continuous variables was unanswered. To address
176
+ this limitation, Davarnia (2021) proposed a technique called arc-reduction that generates a DD
177
+ that represents a relaxation of the underlying continuous problem. In a follow-up work, Salemi
178
+ and Davarnia (2022a) established necessary and sufficient conditions for a general MIP to be
179
+ representable by DDs. They showed that a bounded MIP can be remodeled and solved with DDs
180
+ through employing a specialized Benders decomposition technique. In this paper, we build on this
181
+ framework to design a novel DD-based methodology to solve the SGUFP.
182
+
183
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
184
+ 6
185
+ 1.3.
186
+ Contributions
187
+ While there are several studies in the literature dedicated to the unit train problem, exact method-
188
+ ologies that provide a rigorous treatment of the NSNM requirement at the heart of unit train
189
+ models are scarce. In this paper, we design a novel exact DD-based framework to solve the SGUFP,
190
+ as a more realistic and more challenging variant of this problem class. To our knowledge, this is
191
+ the first work that studies SGUFP from an exact perspective, and the first application of DDs
192
+ to a transportation problem. Our proposed framework formulates the problem in a transformed
193
+ space of variables, which has a smaller dimension compared to the standard MIP formulations
194
+ of the SGUFP. This presentation mitigates the computational difficulties stemmed from the MIP
195
+ formulation size, providing a viable solution approach for large-scale network problems. The core
196
+ principles of our DD framework can also be used to model other transportation problems with
197
+ similar structure, as an alternative to traditional network optimization techniques.
198
+ The remainder of this paper is organized as follows. In Section 2 we provide basic definitions
199
+ and a brief overview on discrete and continuous DD models, including the DD-BD method to
200
+ solve bounded MIPs. In Section 3, we adapt the DD-BD method to solve the SGUFP. We propose
201
+ algorithms to construct exact and relaxed DDs to solve the problem in a transformed space.
202
+ Section 4 presents computational experiments to evaluate the performance of the DD-BD method
203
+ for the SGUFP. We give concluding remarks in Section 5.
204
+ 2.
205
+ Background on DDs
206
+ In this section, we present basic definitions and results relevant to our DD analysis.
207
+ 2.1.
208
+ Overview
209
+ A DD D = (U,A,l) with node set U, arc set A, and arc label mapping l : A → R is a directed acyclic
210
+ graph with n ∈ N arc layers A1,A2,...,An, and n + 1 node layers U1,U2,...,Un+1. The node layers
211
+ U1 and Un+1, with |U1| = |Un+1| = 1, contain the root r and the terminal t, respectively. In any arc
212
+ layer j ∈ [n] := {1,2,...,n}, an arc (u,v) ∈ Aj is directed from the tail node u ∈ Uj to the head node
213
+ v ∈ Uj+1. The width of D is defined as the size of its largest Uj. DDs can model a bounded integer
214
+ set P ⊆ Zn in such a way that each r-t arc-sequence (path) of the form (a1,...,an) ∈ A1 × ... × An
215
+ encodes a point y ∈ P where l(aj) = yj for j ∈ [n], that is y is an n-dimensional point in P whose
216
+ j-th coordinate is equal to the label value l(aj) of arc the aj. For such a DD, we have P = Sol(D),
217
+ where Sol(D) denotes the finite collection of all r-t paths.
218
+ The graphical property of DDs can be exploited to optimize an objective function over a discrete
219
+ set P. To this end, DD arcs are weighted in such a way that the cumulative weight of an r-t
220
+ path that encodes a solution y ∈ P equals to the objective function value evaluated at y. Then, a
221
+
222
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
223
+ 7
224
+ shortest (resp. longest) r-t path for the underlying minimization (resp. maximization) problem is
225
+ found, an operation that can be performed in polynomial time.
226
+ The construction of an exact DD as described above is computationally prohibitive due to the
227
+ exponential growth rate of its size. To alleviate this difficulty, relaxed and restricted DDs are
228
+ proposed to keep the size of DDs under control. In a relaxed DD, nodes are merged in such a way
229
+ that the width of the resulting diagram is bounded by a predetermined width limit. This node-
230
+ merging process ensures that all feasible solutions of the original set are encoded by a subset of
231
+ all r-t paths in the resulting DD. Optimization over this relaxed DD provides a dual bound to the
232
+ optimal solution of the original problems. In a restricted DD, the collection of all r-t paths of the
233
+ DD encode a subset of the feasible solutions of the original set. Optimization over this restricted
234
+ DD provides a primal bound to the optimal solution of the original problems. The restricted and
235
+ relaxed DDs can be iteratively refined in a branch-and-bound scheme to find the optimal value of
236
+ a problem through convergence of their primal and dual bounds. The following example illustrates
237
+ an exact, relaxed and restricted DD for a discrete optimization problem.
238
+ Example 1. Consider the discrete optimization problem max{5y1 + 10y2 + 4y3 | y ∈ P} where
239
+ P = {(1,0,0),(1,0,1),(0,1,0),(0,0,1),(0,0,0)}. The exact DD D with width 3 in Figure 2(a) models
240
+ the feasible region P. The weight of each arc a ∈ Aj, for j ∈ {1,2,3}, shows the contribution of
241
+ variable yj’s value assignment to the objective function. The longest r-t path that encodes the
242
+ optimal solution (y∗
243
+ 1,y∗
244
+ 2,y∗
245
+ 3) = (0,1,0) has length 10, which is the optimal value to the problem.
246
+ By reducing the width limit to 2, we can build relaxed and restricted DDs for P as follows. The
247
+ relaxed DD D in Figure 2(b) provides an upper bound to the optimal solution, where the longest
248
+ path with length 14 is obtained by an infeasible point (y1,y2,y3) = (0,1,1). Finally, the restricted
249
+ DD D in Figure 2(c) gives a lower bound to the optimal solution, where the longest path with
250
+ length 9 encodes a feasible solution (y1,y2,y3) = (1,0,1).
251
+ 2.2.
252
+ Continuous DD Models
253
+ While the framework described in the previous section can be applied to solve different classes of
254
+ discrete optimization problems, its extension to model sets with continuous variables requires a
255
+ fundamentally different approach. The reason that the traditional DD structure is not viable for
256
+ continuous sets is that representing the domain of a continuous variable through arcs requires an
257
+ infinite number of them, spanning all values within a continuous interval, which is structurally
258
+ prohibitive in DD graphs. Fortunately, there is a way to overcome this obstacle by decomposing
259
+ the underlying set into certain rectangular formations, which can in turn be represented through
260
+ node-sequences in DDs. In what follows, we give an overview of these results as relevant to our
261
+ analysis.
262
+
263
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
264
+ 8
265
+ r
266
+ t
267
+ 5
268
+ 0
269
+ 0
270
+ 10
271
+ 0
272
+ 0
273
+ 4
274
+ 0
275
+ 4
276
+ 0
277
+ (a) Exact DD D
278
+ r
279
+ t
280
+ 5
281
+ 0
282
+ 0
283
+ 10
284
+ 0
285
+ 0
286
+ 4
287
+ 4
288
+ 0
289
+ (b) Relaxed DD D
290
+ r
291
+ t
292
+ 5
293
+ 0
294
+ 0
295
+ 0
296
+ 0
297
+ 4
298
+ 4
299
+ 0
300
+ y1
301
+ y2
302
+ y3
303
+ (c) Restricted DD D
304
+ Figure 2
305
+ The exact, relaxed, and restricted DDs representing P in Example 1. Solid and dotted arcs indicate
306
+ one and zero arc labels, respectively. Numbers next to arcs represent weights.
307
+ Consider a bounded set P ⊆ Rn. Salemi and Davarnia (2022a) give necessary and sufficient
308
+ conditions for P to admit the desired rectangular decomposition. Such a set is said to be DD-
309
+ representable w.r.t. a fixed index set I ⊆ [n], as there exists a DD D such that max{f(x) | x ∈
310
+ P} = max{f(x) | x ∈ Sol(D)} for every function f(x) that is convex in the space of variables xI.
311
+ A special case of DD-representable sets is given next.
312
+ Proposition 1. Any bounded mixed integer set of the form P ⊆ Zn × R is DD-representable
313
+ w.r.t. I = {n + 1}.
314
+
315
+ This result gives rise to a novel DD-based framework to solve general bounded MIPs as outlined
316
+ below. Consider a bounded MIP H := max{cy +dx | Ay +Gx ≤ b, y ∈ Zn}. Using Benders decom-
317
+ position (BD), formulation H is equivalent to maxy∈Zn{cy +maxx{dx | Gx ≤ b−Ay}}, which can
318
+ be reformulated as M = max{cy +z | (y;z) ∈ Zn ×[l,u]}, where l,u ∈ R are some valid bounds on z
319
+ induced from the boundedness of H. Here, M is the master problem and z represents the objective
320
+ value of the subproblem maxx{dx | Gx ≤ b − A¯y} for any given ¯y as an optimal solution of the
321
+ master problem. The outcome of the subproblems is either an optimality cut or a feasibility cut
322
+ that will be added to the master problem. Then, the master problem will be resolved. Proposition 1
323
+ implies that formulation M can be directly modeled and solved with DDs. For this DD, we assign n
324
+ arc layers to the integer variables y1,y2,...,yn, and one arc layer to the continuous variable z with
325
+ only two arc labels showing a lower and upper bound for this variable. To find an optimal solution,
326
+ the longest path is calculated, which will be used to solve the subproblems. Note that since M is
327
+ a maximization problem, a longest path of the associated DD encodes an optimal solution, and
328
+ its length gives the optimal value; see Example 2. The feasibility and optimality cuts generated
329
+ by the subproblems will then be added to refine the DD, whose longest path will be recalculated.
330
+
331
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
332
+ 9
333
+ The refinement technique consists of removing arcs of the DD that lead to solutions that violate
334
+ the added inequality, as well as splitting nodes of the DD that lead to different subsequent partial
335
+ assignments; see Bergman et al. (2016a) for a detailed account on DD refinement techniques. We
336
+ illustrate this approach in Example 2.
337
+ Example 2. Suppose that max{2y1 + 4y2 + z | y ∈ P,z ≤ 25} forms the master problem at the
338
+ penultimate iteration of a BD algorithm, where P = {(0,0),(1,1)}. This problem is represented
339
+ by the DD D in Figure 3(a) where −M is a valid lower bound for z. The longest path of D
340
+ encodes the solution (ˆy1, ˆy2, ˆz) = (1,1,25). Assume that using the point (ˆy1, ˆy2) = (1,1) in the
341
+ associated subproblem generates an optimality cut z ≤ 3y1 + 2y2 + 10 for the final iteration of the
342
+ BD algorithm. Refining DD D with respect to this cut yields the new DD in Figure 3(b). The
343
+ longest path represents the optimal solution (y∗
344
+ 1,y∗
345
+ 2,z∗) = (1,1,15) with length 21, which is the
346
+ optimal value.
347
+ r
348
+ t
349
+ 2
350
+ 0
351
+ 4
352
+ 0
353
+ −M
354
+ 25
355
+ 25
356
+ −M
357
+ (a) penultimate iteration
358
+ r
359
+ t
360
+ 2
361
+ 0
362
+ 4
363
+ 0
364
+ −M
365
+ 15
366
+ 10
367
+ −M
368
+ y1
369
+ y2
370
+ z
371
+ (b) final iteration
372
+ Figure 3
373
+ The last two iterations of solving the master problem in Example 2
374
+ Using the DD framework as outlined above can be computationally challenging due to exponen-
375
+ tial growth rate of the size of an exact DD. To mitigate this difficulty, restricted/relaxed DDs can
376
+ be employed inside of the BD framework as demonstrated in Algorithm 1. We refer to this solution
377
+ method as DD-BD (Salemi and Davarnia 2022a).
378
+ In explaining the steps of Algorithm 1, let point ˆy ∈ Zk, where k ≤ n, be a partial value assignment
379
+ to the first k coordinates of variable y, i.e., yi = ˆyi for all i ∈ [k]. We record the set of all partial
380
+ value assignments in ˆY = {ˆy ∈ Zk | k ∈ [n]}∪{⊖}, where ⊖ represents the case where no coordinate
381
+ of y is fixed. Set C contains the produced Benders cuts throughout the algorithm, and we denote
382
+ the feasible region described by these cuts by F C. Further, define MC(ˆy) = max{cy + z | (y;z) ∈
383
+ Zn × [l,u] ∩ F C, yi = ˆyi,∀i ∈ [k]} to be the restricted master problem M obtained through adding
384
+ cuts in C and fixing the partial assignment ˆy. In this definition, the case with C = ∅ and ˆY = {⊖}
385
+ is denoted by M∅(⊖) = M, which is an input to Algorithm 1.
386
+
387
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
388
+ 10
389
+ Algorithm 1: DD-BD
390
+ Data: MIP H, construction method to build restricted and relaxed DDs for M
391
+ Result: An optimal solution (y∗,z∗) and optimal value w∗ to H
392
+ 1 initialize set of partial assignments ˆY = {⊖}, set of Benders cuts C = ∅, and w∗ = −∞
393
+ 2 if ˆY = ∅ then
394
+ 3
395
+ terminate and return (y∗,z∗) and w∗
396
+ 4 else
397
+ 5
398
+ select ˆy ∈ ˆY and update ˆY ← ˆY \ {ˆy}
399
+ 6
400
+ create a restricted DD D associated with MC(ˆy)
401
+ 7
402
+ if D ̸= ∅ then
403
+ 8
404
+ find a longest r-t path of D with encoding point (y,z) and length w
405
+ 9
406
+ solve the BD subproblem using y to obtain Benders cut C
407
+ 10
408
+ if C ∈ C then
409
+ 11
410
+ go to line 17
411
+ 12
412
+ else
413
+ 13
414
+ update C ← C ∪ C and refine D w.r.t. C
415
+ 14
416
+ go to line 8
417
+ 15
418
+ else
419
+ 16
420
+ go to line 2
421
+ 17
422
+ if w > w∗ then
423
+ 18
424
+ update w∗ ← w and (y∗,z∗) ← (y,z)
425
+ 19
426
+ if D provides an exact representation of MC(ˆy) then
427
+ 20
428
+ go to line 2
429
+ 21
430
+ else
431
+ 22
432
+ create a relaxed DD D associated with MC(ˆy)
433
+ 23
434
+ find a longest r-t path of D with length w
435
+ 24
436
+ if w > w∗ then
437
+ 25
438
+ solve the BD subproblem using y to obtain Benders cut C
439
+ 26
440
+ if C ∈ C then
441
+ 27
442
+ go to line 31
443
+ 28
444
+ else
445
+ 29
446
+ update C ← C ∪ C and refine D w.r.t. C
447
+ 30
448
+ go to line 23
449
+ 31
450
+ forall u in the last exact layer of D do
451
+ 32
452
+ update ˆY ← ˆY ∪ {˜y} where ˜y encodes longest r-u path of D
453
+ 33
454
+ go to line 2
455
+
456
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
457
+ 11
458
+ The algorithm starts with constructing a restricted DD D corresponding to MC(ˆy) with empty
459
+ initial values for C and ˆy. We then find a longest r-t path of D encoding solution (y,z). Next,
460
+ using y, we solve the associated subproblem to obtain a feasibility/optimality cut C. We add this
461
+ cut to C, refine D according to it, and find a new longest r-t path. We repeat these steps until no
462
+ new feasibility/optimality cut is generated. At this point, the length of a longest r-t path of D,
463
+ denoted by w, gives a lower bound to the master problem M, which is also a valid lower bound
464
+ to the original problem H. The value of w can be used to update w∗, the optimal value of H
465
+ at termination. Next, we create a relaxed DD D corresponding to MC(ˆy). We find a longest r-t
466
+ path of D that provides an upper bound w to M. If the upper bound w is strictly greater than
467
+ the current value of w∗, we follow steps similarly to the case for D to iteratively refine D w.r.t.
468
+ feasibility/optimality cuts through solving the subproblems, until no new cut is generated. Next,
469
+ we perform a specialized branch-and-bound procedure to improve the bound through expanding
470
+ merged layers of the DD. To this end, we add all the partial assignments associated with nodes in
471
+ the last exact layer of D (the last node layer in which no nodes are merged) to the collection ˆY.
472
+ The nodes corresponding to partial assignments in ˆY are required to be further explored to check
473
+ whether or not the value of w∗ can be improved. That is, the above process is repeated for every
474
+ node v with partial assignment in ˆY as the r-v path is fixed in the new restricted/relaxed DDs.
475
+ The algorithm terminates when ˆY becomes empty, at which point w∗ is the optimal value.
476
+ 3.
477
+ DD-BD Formulation for the SGUFP
478
+ In this section, we adapt the DD-BD framework described in Section 2.2 to solve the SGUFP.
479
+ 3.1.
480
+ MIP Formulation
481
+ We study the MIP formulation of the SGUFP based on that of its deterministic counterpart given
482
+ in Davarnia et al. (2019). Consider a network G = (V,A) with node set V := V ′ ∪ {s,t} and arc set
483
+ A, where s and t are source and sink nodes, respectively. The source node is connected to all the
484
+ supply nodes in S ⊆ V ′, and the sink node is connected to all the demand nodes in D ⊆ V ′. Figure 4
485
+ illustrates the general structure of this network. For a node q ∈ V , let δ−(q) := {i ∈ V | (i,q) ∈ A}
486
+ and δ+(q) := {j ∈ V | (q,j) ∈ A} show the set of incoming and outgoing neighbors of q, respectively.
487
+ Define ¯V ⊆ V ′ as a subset of vertices that must satisfy the NSNM requirement. For each node
488
+ q ∈ ¯V , let binary variable yq
489
+ ij ∈ {0,1} represent whether or not the flow entering node q ∈ ¯V through
490
+ arc (i,q) leaves node q through arc (q,j). The first stage of SGUFP determines the matching pairs
491
+ between incoming and outgoing arcs of unsplittable nodes as follows:
492
+ max
493
+ z
494
+ (1a)
495
+
496
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
497
+ 12
498
+ S
499
+ D
500
+ s
501
+ t
502
+ Figure 4
503
+ Illustration of network G = (V ′ ∪ {s,t},A)
504
+ s.t.
505
+
506
+ j∈δ+(q)
507
+ yq
508
+ ij ≤ 1
509
+ ∀i ∈ δ−(q), ∀q ∈ ¯V
510
+ (1b)
511
+
512
+ i∈δ−(q)
513
+ yq
514
+ ij ≤ 1
515
+ ∀j ∈ δ+(q), ∀q ∈ ¯V
516
+ (1c)
517
+ yq
518
+ ij ∈ {0,1}
519
+ ∀(i,j) ∈ δ−(q) × δ+(q), ∀q ∈ ¯V,
520
+ (1d)
521
+ where constraints (1b) ensure that each incoming arc to a node with NSNM requirement is
522
+ assigned to at most one outgoing arc, and constraints (1c) guarantee that each outgoing arc from
523
+ such a node is matched with at most one incoming arc.
524
+ In (1a)–(1d), variable z represents the objective value of the second stage of SGUFP where
525
+ the demand uncertainty is taken into account. This demand uncertainty is modeled by a set Ξ of
526
+ scenarios for the demand vector dξ with occurrence probability Prξ for each scenario ξ ∈ Ξ. Let
527
+ continuous variable xξ
528
+ ij ∈ R+ denote the flow from node i to node j through arc (i,j) under scenario
529
+ ξ ∈ Ξ. We further assign a reward rij per unit flow to be collected by routing flow through arc (i,j).
530
+ It follows that z = �
531
+ ξ∈Ξ Prξzξ, where zξ is the objective value of the second stage of SGUFP for
532
+ each scenario ξ ∈ Ξ. This subproblem is formulated as follows for a given y vector:
533
+ max
534
+
535
+ q∈V
536
+
537
+ j∈δ+(q)
538
+ rqjxξ
539
+ qj
540
+ (2a)
541
+ s.t.
542
+
543
+ i∈δ−(q)
544
+
545
+ iq −
546
+
547
+ j∈δ+(q)
548
+
549
+ qj = 0
550
+ ∀q ∈ V ′
551
+ (2b)
552
+ ℓξ
553
+ iq ≤ xξ
554
+ iq ≤ uξ
555
+ iq
556
+ ∀i ∈ δ−(q), ∀q ∈ V
557
+ (2c)
558
+
559
+ iq − xξ
560
+ qj ≤ uξ
561
+ iq(1 − yq
562
+ ij)
563
+ ∀(i,j) ∈ δ−(q) × δ+(q), ∀q ∈ ¯V
564
+ (2d)
565
+
566
+ qj − xξ
567
+ iq ≤ uξ
568
+ qj(1 − yq
569
+ ij)
570
+ ∀(i,j) ∈ δ−(q) × δ+(q), ∀q ∈ ¯V
571
+ (2e)
572
+
573
+ iq ≤ uξ
574
+ iq
575
+
576
+ j∈δ+(q)
577
+ yq
578
+ ij
579
+ ∀i ∈ δ−(q), ∀q ∈ ¯V
580
+ (2f)
581
+
582
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
583
+ 13
584
+
585
+ qj ≤ uξ
586
+ qj
587
+
588
+ i∈δ−(q)
589
+ yq
590
+ ij
591
+ ∀j ∈ δ+(q), ∀q ∈ ¯V
592
+ (2g)
593
+
594
+ ij ≥ 0
595
+ ∀(i,j) ∈ A.
596
+ (2h)
597
+ In the above formulation, the objective function captures the total reward collected by routing
598
+ flows throughout the network (from the source s to the sink t) to satisfy demands. The flow-balance
599
+ requirements are represented by (2b). Constraints (2c) bound the flow on each arc from below
600
+ and above. To impose the demand requirement for each scenario ξ ∈ Ξ, we fix ℓξ
601
+ qt = uξ
602
+ qt = dξ
603
+ q for all
604
+ demand nodes q ∈ D with demand dξ
605
+ q, and leave the lower and upper bound values unchanged for all
606
+ other arcs. Constraints (2d)–(2g) model the NSNM requirement for each node q ∈ ¯V . In particular,
607
+ (2d) and (2e) ensure that matching arcs (i,q) and (q,j) have equal flows. Constraints (2f) and (2g)
608
+ guarantee that an arc without a matching pair does not carry any flow. We note here that the
609
+ Constraint (2b) is implied by other constraints of the above subproblem under the assumption
610
+ that y is feasible to the master problem (1a)–(1d). However, we maintain this constraint in the
611
+ subproblem because the master formulation in our DD-based approach, as will be described in
612
+ Section 3.2, may produce a solution that is not feasible to (1a)–(1d). As a result, the addition of
613
+ the Constraint (2b) will lead to a tighter subproblem formulation.
614
+ As discussed in Section 2.2, the first step to use the DD-BD algorithm is to decompose the
615
+ underlying problem into a master and a subproblem. The above two-stage formulation of the
616
+ SGUFP is readily amenable to BD since the first stage problem (1a)-(1d) can be considered as the
617
+ master problem together with some valid lower and upper bounds −Γ and Γ on z induced from the
618
+ boundedness of the MIP formulation. For a given y value obtained from the master problem and
619
+ a scenario ξ ∈ Ξ, the second stage problem (2a)-(2h) can be viewed as the desired subproblems.
620
+ The optimality/feasibility cuts obtained from each scenario-based subproblem are then added to
621
+ the master problem through aggregation as described in Section 3.3.
622
+ 3.2.
623
+ DD-BD: Master Problem Formulation
624
+ While the DD-BD Algorithm 1 provides a general solution framework for any bounded MIP, its DD
625
+ component is problem-specific, i.e., it should be carefully designed based on the specific structure
626
+ of the underlying problem. In this section, we design such an oracle for the SGUFP that represents
627
+ the feasible region {(1b)−(1d),z ∈ [−Γ,Γ]} of the master problem (1a)-(1d). To model this feasible
628
+ region in the original space of (y;z) variables, a DD would require �
629
+ q∈ ¯V |δ−(q)| × |δ+(q)| arc
630
+ layers to represent binary variables y and one arc layer to encode the continuous variable z.
631
+ Constructing such a DD, however, would be computationally cumbersome due to the large number
632
+ of the arc layers. To mitigate this difficulty, we take advantage of the structural flexibility of DDs
633
+ in representing irregular variable types that cannot be used in standard MIP models. One such
634
+
635
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
636
+ 14
637
+ variable type is the index set, where arc layers represent indices, rather than domain values. We
638
+ next show that we can remarkably reduce the number of DD arc layers by reformulating the master
639
+ problem in a transformed space of variables defined over index sets.
640
+ Consider a node q ∈ ¯V . In the following, we define mappings that assign an index to each incoming
641
+ and outgoing arc of q. These mappings enable us to define new variables to reduce the number
642
+ of DD arc layers. Let ind−(i,q) be a one-to-one mapping from incoming arcs (i,q), for i ∈ δ−(q),
643
+ to the index set {1,2,...,|δ−(q)|}. Similarly, let ind+(q,j) be a one-to-one mapping from outgoing
644
+ arcs (q,j), for j ∈ δ+(q), to the index set {1,2,...,|δ+(q)|}. For each incoming arc (i,q) with index
645
+ h = ind−(i,q), we define an integer variable wq
646
+ h ∈ {0,1,...,|δ+(q)|} such that wq
647
+ h = 0 if this incoming
648
+ arc is not paired with any outgoing arc, and wq
649
+ h = k > 0 if this arc is matched with an outgoing arc
650
+ (q,j) with index k = ind+(q,j).
651
+ Next, we give a formulation in the space of w variables that describes the matching between
652
+ incoming and outgoing arcs of q for all q ∈ ¯V . In the following, sign(.) represents the sign function
653
+ that returns 1 if its argument is strictly positive, 0 if the argument is zero, and −1 otherwise.
654
+ Further, the operator |.|, when applied on a set, represents the set size; and when applied on a real
655
+ number, it represents the absolute value.
656
+ Proposition 2. Formulation
657
+
658
+ i∈δ−(q)
659
+ sign
660
+ ����wq
661
+ ind−(i,q) − ind+(q,j)
662
+ ���
663
+
664
+
665
+ ��δ−(q)
666
+ �� − 1
667
+ ∀j ∈ δ+(q), ∀q ∈ ¯V
668
+ (3a)
669
+ wq
670
+ ind−(i,q) ∈
671
+
672
+ 0,1,...,
673
+ ��δ+(q)
674
+ ���
675
+ ∀i ∈ δ−(q), ∀q ∈ ¯V
676
+ (3b)
677
+ models the matching between incoming and outgoing arcs of nodes q ∈ ¯V .
678
+ Proof.
679
+ We show the result for a single node q ∈ ¯V . The extension to the multiple node case
680
+ is straightforward as the matching problem for each node is independent from other nodes. For
681
+ the direct implication, assume that M q is a matching between incoming and outgoing arcs of q,
682
+ with elements of the form (i,j) that represent a matching between the incoming arc (i,q) and
683
+ the outgoing arc (q,j). We show that variables w associated with matching pairs in M q satisfy
684
+ constraints (3a) and (3b). It follows from the definition of w that, for each (i,j) ∈ M q, we have
685
+ wq
686
+ ind−(i,q) = ind+(q,j). Also, for any i ∈ δ−(q) that does not have a matching pair in M q, we have
687
+ wq
688
+ ind−(i,q) = 0. These value assignments show that w satisfies (3b) as the image of ind+ mapping is
689
+ {1,...,|δ+(q)|}. For each i ∈ δ−(q) and j ∈ δ+(q), we have
690
+ ���wq
691
+ ind−(i,q) − ind+(q,j)
692
+ ��� ≥ 0, with equality
693
+ holding when (i,j) ∈ M q. For each j ∈ δ+(q), there are two cases. For the first case, assume that
694
+ (i,j) /∈ M q for any i ∈ δ−(q). As a result,
695
+ ���wq
696
+ ind−(i,q) − ind+(q,j)
697
+ ��� > 0 for all i ∈ δ−(q). Applying
698
+ the sign(.) function on these terms yields sign
699
+ ����wq
700
+ ind−(i,q) − ind+(q,j)
701
+ ���
702
+
703
+ = 1, which implies that
704
+
705
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
706
+ 15
707
+
708
+ i∈δ−(q) sign
709
+ ����wq
710
+ ind−(i,q) − ind+(q,j)
711
+ ���
712
+
713
+ = |δ−(q)|, satisfying (3a). For the second case, assume that
714
+ (i∗,j) ∈ M q for some i∗ ∈ δ−(q). As a result, we have �
715
+ i∈δ−(q) sign
716
+ ����wq
717
+ ind−(i,q) − ind+(q,j)
718
+ ���
719
+
720
+ =
721
+ |δ−(q)| − 1 since sign
722
+ ����wq
723
+ ind−(i∗,q) − ind+(q,j)
724
+ ���
725
+
726
+ =
727
+ ���wq
728
+ ind−(i∗,q) − ind+(q,j)
729
+ ��� = 0, satisfying (3a).
730
+ For the reverse implication, assume that w is a feasible solution to (3a)–(3b). We show that the
731
+ pairs of the form (i,j) encoded by these variables constitute a feasible matching between incoming
732
+ and outgoing arcs of q, i.e., (i) each arc (i,q) is matched with at most one arc (q,j), and (ii) each
733
+ arc (q,j) is matched with at most one arc (i,q). It follows from constraint (3b) that, for each i ∈
734
+ δ−(q), variable wq
735
+ ind−(i,q) takes a value between {0,1,...,|δ+(q)|}. If wq
736
+ ind−(i,q) = 0, then (i,q) is not
737
+ matched with any outgoing arc, otherwise it is matched with arc (q,j) with ind+(q,j) = wq
738
+ ind−(i,q).
739
+ This ensures that condition (i) above is satisfied for this matching collection. Further, for each
740
+ j ∈ δ−(q), constraint (3a) implies that sign
741
+ ����wq
742
+ ind−(i,q) − ind+(q,j)
743
+ ���
744
+
745
+ can be equal to zero for at
746
+ most one i ∈ δ−(q). In such a case, we would have at most one matching pair of the form (i,j) in
747
+ the collection, showing that condition (ii) above is satisfied.
748
+
749
+ It follows from Proposition 2 that constraints (3a)-(3b) can replace (1b)-(1d) in the master
750
+ problem (1a)-(1d) to obtain the following master problem in a transformed space of variables.
751
+ max
752
+ w;z {z | (3a) − (3b),z ∈ [−Γ,Γ]}.
753
+ (4)
754
+ Note that formulation (4) is an integer nonlinear program (INLP) with nonconvex and non-
755
+ continuous constraint functions. Such a formulation is extremely difficult for conventional MINLP
756
+ techniques and solvers to handle. However, due to structural flexibility of DDs in representing inte-
757
+ ger nonlinear programs, this problem can be easily modeled via a DD; see Davarnia and Van Hoeve
758
+ (2020) for a detailed account on using DDs for modeling INLPs. In the following, we present an
759
+ algorithm to construct DDs in the space of (w;z) variables for the master problem (4) with a
760
+ single node q ∈ ¯V . The extension to the case with multiple nodes follows by replicating the DD
761
+ structure. The output of Algorithm 2 is a DD with |δ−(q)| + 1 arc layers where the first |δ−(q)|
762
+ layers represent w variables and the last layer encodes variable z. In this algorithm, su denotes the
763
+ state value of DD node u. The core idea of the algorithm is to use unpaired outgoing arcs of q as
764
+ the state value at each DD layer that represents the matching for an incoming arc of q.
765
+ Next, We show that the solution set of the DD constructed by Algorithm 2 represents the feasible
766
+ region of (4). Note here that DD representation of a MIP set, as described in Section 2.2, does
767
+ not imply the encoding of all of the solutions of the set, but rather the encoding of a subset of all
768
+ solutions that subsumes all the extreme points of the set. Such a representation is sufficient to solve
769
+ an optimization problem over the set with an objective function convex in continuous variables,
770
+ which is the case for (4).
771
+
772
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
773
+ 16
774
+ Algorithm 2: Construction of DD for the master problem of SGUFP with a node q ∈ ¯V
775
+ Data: node q ∈ ¯V , parameter Γ
776
+ Result: an exact DD D
777
+ 1 create the root node r ∈ U1 with state sr = {0,1,...,|δ+(q)|}
778
+ 2 forall i ∈ {1,2,...,|δ−(q)|} and u ∈ Ui do
779
+ 3
780
+ forall ℓ ∈ su do
781
+ 4
782
+ create a node v ∈ Ui+1 with state (su \ {ℓ}) ∪ {0} and an arc a ∈ Ai connecting u to v
783
+ with label l(a) = ℓ
784
+ 5 forall u ∈ U1+|δ−(q)| do
785
+ 6
786
+ create two arcs a1,a2 ∈ A1+|δ−(q)| connecting u to the terminal node with labels l(a1) = Γ
787
+ and l(a2) = −Γ.
788
+ Theorem 1. Consider a SGUFP with ¯V = {q}. Let D be a DD constructed by Algorithm 2.
789
+ Then, Sol(D) represents the feasible region of (4).
790
+ Proof.
791
+ (⊆) Consider an r-t path of D that encodes solution ( ˜wq,z). According to Algorithm 2,
792
+ the labels of the first |δ−(q)| arcs of this path belong to {0,1,...,|δ+(q)|}, showing that ˜wq
793
+ satisfies constraints (3b). Assume by contradiction that ˜wq does not satisfy constraints (3a),
794
+ i.e., �
795
+ i∈δ−(q) sign
796
+ ����wq
797
+ ind−(i,q) − ind+(q,j)
798
+ ���
799
+
800
+ ≤ |δ−(q)| − 2 for some j ∈ δ+(q). This implies that
801
+ ˜wq
802
+ ind−(i′,q) = ˜wq
803
+ ind−(i′′,q) = ind+(q,j) for two distinct i′,i′′ ∈ δ−(q). In other words, the arcs at lay-
804
+ ers ind−(i′,q) and ind−(i′′,q) of the selected r-t path both share the same label value ind+(q,j).
805
+ According to line 3 of Algorithm 2, we must have that the state value of nodes at layers ind−(i′,q)
806
+ and ind−(i′′,q) of the r-t path both contain ind+(q,j). This is a contradiction to the state update
807
+ policy in line 4 of Algorithm 2, since positive arc labels at each layer of the DD will be excluded
808
+ from the state value of the subsequent nodes.
809
+ (⊇) Consider a feasible solution point ( ˜wq; ˜z) of (4). Suppose ˜wq = (ℓ1,ℓ2,...,ℓ|δ−(q)|). According
810
+ to constraints (3a), no two coordinates of ˜wq have the same positive value. The state value at the
811
+ root node in D contains all index values {0,1,...,|δ+(q)|}. According to Algorithm 2, there exists
812
+ an arc with label ℓ1 at the first layer of D. The state value at the head node of this arc, therefore,
813
+ contains ℓ2 ∈ {0,1,...,|δ+(q)|} \ {ℓ1}, which guarantees an arc with label ℓ2 at the second layer of
814
+ this path. Following a similar approach, we can track a path from the root to layer |δ−(q)| whose
815
+ arcs labels match values of ˜wq. Note for the last layer that ˜z ∈ [−Γ,Γ], which is included in the
816
+ interval between arc labels of the last layer of D. As a result, ( ˜wq; ˜z) is represented by an r-t path
817
+ of D.
818
+
819
+ The main purpose of using a DD that models the master problem (4) over one that models (1a)-
820
+ (1d) is the size reduction in arc layers that represent variables w as compared with variables
821
+
822
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
823
+ 17
824
+ y. It turns out that this space transformation can significantly improve the solution time of the
825
+ DD approach. We refer the interested reader to Appendix A for a detailed discussion on these
826
+ advantages, including preliminary computational results.
827
+ Constructing exact DDs as described in Algorithm 2 can be computationally expensive for large
828
+ size problems. As discussed in Section 2.2, relaxed and restricted DDs are used to circumvent this
829
+ difficulty. Building restricted DDs is straightforward as it involves the selection of a subset of r-t
830
+ paths of the exact DD that satisfy a preset width limit. Constructing relaxed DDs, on the other
831
+ hand, requires careful manipulation of the DD structure to merge nodes in such a way that it
832
+ encodes a superset of all r-t paths of the exact DD. We demonstrate a method to construct such
833
+ relaxed DDs in Algorithm 3. Similarly to Algorithm 2, this algorithm is presented for a single
834
+ NSNM node, but can be extended to multiple nodes by replicating the procedure.
835
+ Algorithm 3: Construction of relaxed DD for the master problem of SGUFP with a node
836
+ q ∈ ¯V
837
+ Data: node q ∈ ¯V , parameter Γ
838
+ Result: a relaxed DD D
839
+ 1 create the root node r ∈ U1 with state sr = {0,1,...,|δ+(q)|}
840
+ 2 forall i ∈ {1,2,...,|δ−(q)|} and u ∈ Ui do
841
+ 3
842
+ forall ℓ ∈ su do
843
+ 4
844
+ create a node v ∈ Ui+1 with state (su \ {ℓ}) ∪ {0} and an arc a ∈ Ai connecting u to v
845
+ with label l(a) = ℓ
846
+ 5
847
+ select a subset of nodes v1,v2,...,vk ∈ Ui+1 and merge them into node v′ with state
848
+ sv′ = �k
849
+ j=1 svj
850
+ 6 forall u ∈ U1+|δ−(q)| do
851
+ 7
852
+ create two arcs a1,a2 ∈ A1+|δ−(q)| connecting u to the terminal node with labels l(a1) = Γ
853
+ and l(a2) = −Γ.
854
+ Theorem 2. Consider a SGUFP with ¯V = {q}. Let D be a DD constructed by Algorithm 3.
855
+ Then, D represents a relaxation of the feasible region of (4).
856
+ Proof.
857
+ Let ˙D be the DD constructed by Algorithm 2 for the master problem (4) with a single
858
+ node q ∈ ¯V . It suffices to show that the solution set of D provides a relaxation for that of ˙D. Pick
859
+ a root-terminal path ˙P of ˙D with encoding point ( ˙wq; ˙z). We show that there exist a root-terminal
860
+ path P of D with encoding point (wq;z) such that wq = ˙wq and z = ˙z. Given a DD, define Pk to
861
+ be a sub-path composed of arcs in the first k layers, for 1 ≤ k ≤ |δ−(q)|. We show for any sub-path
862
+ ˙Pk of ˙D with encoding point ˙wq
863
+ k = ( ˙wq
864
+ 1,..., ˙wq
865
+ k), there exists a sub-path P k of D with encoding
866
+
867
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
868
+ 18
869
+ point wk = (w1,...,wk) such that wh = ˙wh for h = 1,...,k. Note that we only need to prove the
870
+ matching values for k ≤ |δ−(q)|, because each node at node layer |δ−(q)| + 1 of both ˙D and D
871
+ is connected by two arcs with labels −Γ and Γ to the terminal node, and thus there are always
872
+ matching arcs with the same label for the last layer, i.e., z = ˙z. We prove the result by induction on
873
+ k. The base case for k = 1 is trivial, since D contains arcs with labels {0,1,...,|δ+(q)|} in the first
874
+ layer, which includes the label value of the first arc on ˙P1. For the induction hypothesis, assume
875
+ that the statement is true for k = d, i.e., for the sub-path ˙Pd with label values ˙wq
876
+ d = ( ˙wq
877
+ 1,..., ˙wq
878
+ d),
879
+ there is sub-path P d of D with matching arc labels. We show the statement holds for d + 1. Let
880
+ u ∈ ˙Ad+1 and v ∈ Ad+1 be the end nodes of ˙Pd and P d, respectively. It follows from Algorithm 2
881
+ that the index set representing the state value at node u contains ˙wq
882
+ d+1, i.e., ˙wq
883
+ d+1 ∈ ˙su = {0} ∪
884
+ {1,...,|δ+(q)|} \ { ˙w1, ˙w2,..., ˙wd}. The merging step in line 5 of Algorithm 3, on the other hand,
885
+ implies that sv ⊇ {0}∪{1,...,|δ+(q)|}\{w1,w2,...,wd} = {0}∪{1,...,|δ+(q)|}\{ ˙w1, ˙w2,..., ˙wd} =
886
+ ˙su, where the inclusion follows from the fact that state values at nodes on path P d contain those of
887
+ each individual path due to merging operation, and the first equality holds because of the induction
888
+ hypothesis. As a result, sv must contain ˙wq
889
+ d+1, which implies that there exists an arc with ˙wq
890
+ d+1
891
+ connected to node v on P d. Attaching this arc to P d, we obtain the desired sub-path P d+1.
892
+
893
+ 3.3.
894
+ DD-BD: Subproblem Formulation
895
+ At each iteration of the DD-BD algorithm, an optimal solution of the master problem is plugged into
896
+ the subproblems to obtain feasibility/optimality cuts. For the SGUFP formulation, this procedure
897
+ translates to obtaining an optimal solution of (4) in the space of w variables, which is used to
898
+ solve the subproblem (2a)-(2h). The formulation of the subproblem, however, is defined over the
899
+ original binary variables y, and the resulting feasibility/optimality cuts are generated in this space.
900
+ To remedy this discrepancy between the space of variables in the master and subproblems, we need
901
+ to find a one-to-one mapping between variables w and y, as outlined next.
902
+ Proposition 3. Consider a node q ∈ ¯V . Let yq be a feasible solution to (1b)-(1d). Then, wq
903
+ obtained as
904
+ wq
905
+ ind−(i,q) =
906
+
907
+ j∈δ+(q)
908
+ ind+(q,j)yq
909
+ ij
910
+ ∀i ∈ δ−(q),
911
+ (5)
912
+ is a feasible solution to (3a)-(3b). Conversely, let wq be a feasible solution to (3a)-(3b). Then, yq
913
+ obtained as
914
+ yq
915
+ ij = 1 − sign
916
+ ����wq
917
+ ind−(i,q) − ind+(q,j)
918
+ ���
919
+
920
+ ∀(i,j) ∈ δ−(q) × δ+(q),
921
+ (6)
922
+ is a feasible solution to (1b)-(1d).
923
+
924
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
925
+ 19
926
+ Proof.
927
+ For the direct statement, let yq be a feasible solution to (1b)-(1d), and construct a
928
+ vector wq according to (5). We show that wq satisfies all constraints (3a)-(3b). First, we show
929
+ that constraints (3a) are satisfied. Assume by contradiction that there exists j′ ∈ δ+(q) such that
930
+
931
+ i∈δ−(q) sign
932
+ ����wq
933
+ ind−(i,q) − ind+(q,j′)
934
+ ���
935
+
936
+ ≤ |δ−(q)| − 2. This implies that wq
937
+ ind−(i′,q) = wq
938
+ ind−(i′′,q) =
939
+ ind+(q,j′) for some i′,i′′ ∈ δ−(q). Then, we can write that
940
+ wq
941
+ ind−(i′,q) =
942
+
943
+ j∈δ+(q)
944
+ ind+(q,j)yq
945
+ i′j = ind+(q,j′) =
946
+
947
+ j∈δ+(q)
948
+ ind+(q,j)yq
949
+ i′′j = wq
950
+ ind−(i′′,q),
951
+ where the first and last equalities hold by (5). The second and third equalities in the above chain
952
+ of relations imply that yq
953
+ i′j′ = yq
954
+ i′′j′ = 1, since ind+(q,j′) > 0. This violates constraints (1c), reaching
955
+ a contradiction. Next, we show that constraints (3b) are satisfied. The proof follows directly from
956
+ construction of wq and constraints (1b).
957
+ For the converse statement, let wq be a feasible solution to (3a)-(3b), and construct a vec-
958
+ tor yq according to (6). We show that yq satisfies all constraints (1b)-(1d). To show that each
959
+ constraint (1b) is satisfied, consider i ∈ δ−(q). We can write that
960
+
961
+ j∈δ+(q)
962
+ yq
963
+ ij = |δ+(q)| −
964
+
965
+ j∈δ+(q)
966
+ sign
967
+ ����wq
968
+ ind−(i,q) − ind+(q,j)
969
+ ���
970
+
971
+ ≤ |δ+(q)| −
972
+
973
+ |δ+(q)| − 1
974
+
975
+ = 1,
976
+ where the first equality follows from the construction of yq, and the inequality holds by (3b) as
977
+ ���wq
978
+ ind−(i,q) − ind+(q,j)
979
+ ��� = 0 for at most one index j ∈ δ+(q). To show that each constraint (1c) is
980
+ satisfied, select j ∈ δ+(q). We have
981
+
982
+ i∈δ−(q)
983
+ yq
984
+ ij = |δ−(q)| −
985
+
986
+ i∈δ−(q)
987
+ sign
988
+ ����wq
989
+ ind−(i,q) − ind+(q,j)
990
+ ���
991
+
992
+ ≤ 1,
993
+ where the equality follows from the construction of yq, and the inequality holds because of con-
994
+ straint (3a). Finally, each constraint (1d) is satisfied due to the fact that 1 − sign(|.|) ∈ {0,1}.
995
+
996
+ Proposition 4. Mappings described by (5) and (6) are one-to-one over their respective
997
+ domains.
998
+ Proof.
999
+ Note that the mapping described by (5) is a linear transformation of the form wq = Byq
1000
+ with coefficient matrix B ∈ Z|δ−(q)|×(|δ−(q)||δ+(q)|). It is clear from the identity block structure of B,
1001
+ that it is full row-rank, since each column contains a single non-zero element while each row has
1002
+ at least one non-zero element. As a result, the null space of B is the origin, which implies that
1003
+ ˆwq = ˜wq only if ˆyq = ˜yq.
1004
+ For the mapping described by (6), let distinct points ˆwq and ˜wq satisfy (3b). Construct vectors
1005
+ ˆyq and ˜yq by (6) using ˆwq and ˜wq, respectively. Because ˆwq and ˜wq are distinct, there must
1006
+
1007
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
1008
+ 20
1009
+ exist i ∈ δ−(q) such that ˆwq
1010
+ ind−(i,q) ̸= ˜wq
1011
+ ind−(i,q). This implies that at least one of these variables, say
1012
+ ˆwq
1013
+ ind−(i,q), is non-zero. It follows from (3b) that ˆwq
1014
+ ind−(i,q) = ind+(q,j′) for some j′ ∈ δ+(q), and that
1015
+ ˆwq
1016
+ ind−(i,q) ̸= ind+(q,j′). According to (6), we write that ˆyij′ = 1−sign
1017
+ ���� ˆwq
1018
+ ind−(i,q) − ind+(q,j′)
1019
+ ���
1020
+
1021
+ = 1,
1022
+ and that ˜yij′ = 1 − sign
1023
+ ���� ˜wq
1024
+ ind−(i,q) − ind+(q,j′)
1025
+ ���
1026
+
1027
+ = 0, showing that ˆyq ̸= ˜yq.
1028
+
1029
+ Using the results of Propositions 3 and 4, we can apply the DD-BD Algorithm 1 in its entirety for
1030
+ the SGUFP. In particular, at each iteration of the algorithm, we can transform the optimal solution
1031
+ ( ¯w, ¯z) obtained from the DD representing the master problem (4) into a solution (¯y, ¯z) through the
1032
+ mapping (6). Given an optimal first-stage solution ¯y, we can solve |Ξ| separate subproblems; one
1033
+ for each demand realization in the second-stage. The feasibility cuts obtained from subproblems,
1034
+ which are in the space of y variables, are translated back into the space of w variables through the
1035
+ mapping (5) and added to the master problem. Further, in a case where all subproblems produce
1036
+ an optimality cut, they can be aggregated to generate an optimality cut in the space of (y,z),
1037
+ which is added to the master problem after being translated into the space of (w,z) variables. The
1038
+ master DD will be refined with respect to the resulting inequalities, and an optimal solution is
1039
+ returned to be used for the next iteration.
1040
+ In the remainder of this section, we present details on the derivation of optimality/feasibility cuts
1041
+ from subproblem (2a)-(2h). Consider the following partitioning of the set of arcs A into subsets
1042
+ A1 :=
1043
+
1044
+ (i,j) ∈ A
1045
+ �� δ−(i) = ∅, δ+(j) ̸= ∅
1046
+
1047
+ , A2 :=
1048
+
1049
+ (i,j) ∈ A
1050
+ �� δ−(i) ̸= ∅, δ+(j) = ∅
1051
+
1052
+ ,
1053
+ A3 :=
1054
+
1055
+ (i,j) ∈ A
1056
+ �� δ−(i) ̸= ∅, δ+(j) ̸= ∅
1057
+
1058
+ , A4 :=
1059
+
1060
+ (i,j) ∈ A
1061
+ �� δ−(i) = ∅, δ+(j) = ∅
1062
+
1063
+ ,
1064
+ and let θξ = (βξ,γξ,δξ,φξ,λξ,µξ) be the vector of dual variables associated with constraints of
1065
+ (2a)-(2h) for a scenario ξ ∈ Ξ. Further, define the bi-function
1066
+ f(y;θξ) =
1067
+
1068
+ q∈V
1069
+
1070
+ j∈δ+(q)
1071
+
1072
+ −ℓqjβξ
1073
+ qj + uqjγξ
1074
+ qj
1075
+
1076
+ +
1077
+
1078
+ q∈ ¯V
1079
+
1080
+ (i,j)∈δ−(q)×δ+(q)
1081
+
1082
+ uiq(1 − yq
1083
+ ij)λξ
1084
+ iqj + uqj(1 − yq
1085
+ ij)µξ
1086
+ iqj
1087
+
1088
+ +
1089
+
1090
+ q∈ ¯V
1091
+
1092
+ i∈δ−(q)
1093
+
1094
+ �uiq
1095
+
1096
+ j∈δ+(q)
1097
+ yq
1098
+ ijσξ
1099
+ iq
1100
+
1101
+ � +
1102
+
1103
+ q∈ ¯V
1104
+
1105
+ j∈δ+(q)
1106
+
1107
+ �uqj
1108
+
1109
+ i∈δ−(q)
1110
+ yq
1111
+ ijφξ
1112
+ qj
1113
+
1114
+ �.
1115
+ For a given ¯y and each scenario ξ ∈ Ξ, the dual of the subproblem (2a)-(2h) can be written as
1116
+ follows where the symbol ⋆ on a node means that it belongs to ¯V .
1117
+ min
1118
+ f(¯y;θξ)
1119
+ (7a)
1120
+ s.t.
1121
+ αξ
1122
+ ⋆q − βξ
1123
+ i⋆q + γξ
1124
+ i⋆q +
1125
+
1126
+ j:j∈δ+(⋆q)
1127
+ λξ
1128
+ i⋆qj −
1129
+
1130
+ j:j∈δ+(⋆q)
1131
+ µξ
1132
+ i⋆qj + σξ
1133
+ i⋆q ≥ ri⋆q
1134
+ ∀(i,
1135
+ ⋆q) ∈ A1 (7b)
1136
+ αξ
1137
+ q − βξ
1138
+ iq + γξ
1139
+ iq ≥ riq
1140
+ ∀(i,q) ∈ A1 (7c)
1141
+
1142
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
1143
+ 21
1144
+ − αξ
1145
+ ⋆q − βξ
1146
+ ⋆qj + γξ
1147
+ ⋆qj −
1148
+
1149
+ i:i∈δ−(⋆q)
1150
+ λξ
1151
+ i⋆qj +
1152
+
1153
+ i:i∈δ−(⋆q)
1154
+ µξ
1155
+ i⋆qj + φξ
1156
+ ⋆qj ≥ r⋆qj
1157
+ ∀(
1158
+ ⋆q,j) ∈ A2 (7d)
1159
+ − αξ
1160
+ q − βξ
1161
+ qj + γξ
1162
+ qj ≥ rqj
1163
+ ∀(q,j) ∈ A2 (7e)
1164
+ − αξ
1165
+ ⋆q + αξ
1166
+
1167
+ j − βξ
1168
+ ⋆q
1169
+
1170
+ j + γξ
1171
+ ⋆q
1172
+
1173
+ j +
1174
+
1175
+ i∈δ−(⋆q)
1176
+
1177
+ µξ
1178
+ i⋆q
1179
+
1180
+ j − λξ
1181
+ i⋆q
1182
+
1183
+ j
1184
+
1185
+ +
1186
+
1187
+ i∈δ+(
1188
+
1189
+ j)
1190
+
1191
+ λξ
1192
+ ⋆q
1193
+
1194
+ ji − µξ
1195
+ ⋆q
1196
+
1197
+ ji
1198
+
1199
+ + σξ
1200
+ ⋆q
1201
+
1202
+ j + φξ
1203
+ ⋆q
1204
+
1205
+ j ≥ r⋆q
1206
+
1207
+ j
1208
+ ∀(
1209
+ ⋆q,
1210
+ ⋆j) ∈ A3
1211
+ (7f)
1212
+ − αξ
1213
+ ⋆q + αξ
1214
+ j − βξ
1215
+ ⋆qj + γξ
1216
+ ⋆qj +
1217
+
1218
+ i∈δ−(⋆q)
1219
+
1220
+ µξ
1221
+ i⋆qj − λξ
1222
+ i⋆qj
1223
+
1224
+ + φξ
1225
+ ⋆qj ≥ r⋆qj
1226
+ ∀(
1227
+ ⋆q,j) ∈ A3 (7g)
1228
+ − αξ
1229
+ q + αξ
1230
+
1231
+ j − βξ
1232
+ q
1233
+
1234
+ j + γξ
1235
+ q
1236
+
1237
+ j +
1238
+
1239
+ i∈δ+(
1240
+
1241
+ j)
1242
+
1243
+ λξ
1244
+ q
1245
+
1246
+ ji − µξ
1247
+ q
1248
+
1249
+ ji
1250
+
1251
+ + σξ
1252
+ q
1253
+
1254
+ j ≥ rq
1255
+
1256
+ j
1257
+ ∀(q,
1258
+ ⋆j) ∈ A3 (7h)
1259
+ − αξ
1260
+ q + αξ
1261
+ j − βξ
1262
+ qj + γξ
1263
+ qj ≥ rqj
1264
+ ∀(q,j) ∈ A3
1265
+ (7i)
1266
+ − βξ
1267
+ iq + γξ
1268
+ iq ≥ riq
1269
+ ∀(i,q) ∈ A4
1270
+ (7j)
1271
+ αξ
1272
+ q ∈ R
1273
+ ∀q ∈ V ′ (7k)
1274
+ βξ
1275
+ ij, γξ
1276
+ ij, σξ
1277
+ ij, φξ
1278
+ ij, λξ
1279
+ iqj, µξ
1280
+ iqj ≥ 0
1281
+ ∀i,q,j ∈ V.
1282
+ (7l)
1283
+ If the above problem has an optimal solution ˆθξ for all ξ ∈ Ξ, the output of the subproblems will
1284
+ be an optimality cut of the form �
1285
+ ξ∈Ξ Prξf(y; ˆθξ) ≥ z. If the above problem is unbounded along a
1286
+ ray ˆθξ for a ξ ∈ Ξ, the output of the subproblem will be a feasibility cut of the form f(y; ˆθξ) ≥ 0.
1287
+ Note that replacing variables y in the above constraints with w through the mapping (5) results
1288
+ in separable nonlinear constraints. Nevertheless, since these constraints will be used to refine the
1289
+ master DD, their incorporation is simple due to structural flexibility of DDs in modeling such
1290
+ constraints; we refer the reader to Davarnia and Van Hoeve (2020) for a detailed account for
1291
+ modeling INLPs with DDs.
1292
+ 4.
1293
+ Computational Experiments
1294
+ In this section, we solve SGUFP as a core model for the unit train scheduling problem with demand
1295
+ stochasticity using three different approaches: (i) the standard MIP formulation that is a deter-
1296
+ ministic equivalent of the two-stage model and contains all variables and constraints of the master
1297
+ problem and |Ξ| subproblems; (ii) the Benders reformulation presented in Section 3.1 composed
1298
+ of the master problem (1a)-(1d) and |Ξ| subproblems (2a)-(2h); and (iii) the DD-BD algorithm
1299
+ proposed in the present paper. In the Benders approach, we solve separate subproblems using a
1300
+ fixed vector ¯y obtained from the master problem. The feasibility cuts generated by subproblems
1301
+ are added directly to the constraint set of the master problem, and the optimality cuts are added
1302
+ as an aggregated cut over all scenarios. We note here that when there is a feasibility cut for any
1303
+ scenario, we add it directly to separate the solution of the current iteration and move on to the
1304
+ next iteration. To obtain a valid inequality that provides a bound for the single z variable, we need
1305
+ to aggregate valid inequalities over all scenario subproblems as z is composed of the objective value
1306
+
1307
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
1308
+ 22
1309
+ of all these subproblems. Therefore, we can only produce an optimality cut for the z variable when
1310
+ we have optimality cuts for all of the subproblems. For the DD-BD approach, we use the following
1311
+ algorithmic choices to build restricted and relaxed DDs. For the restricted DDs, we choose a subset
1312
+ of the r-t paths with largest lengths, which are more likely to contain an optimal solution. For
1313
+ the relaxed DDs, we merge nodes that have the largest number of common members in their state
1314
+ values. We refer the reader to Bergman et al. (2016a) for other heuristic approaches that could be
1315
+ used for this purpose.
1316
+ 4.1.
1317
+ Test Instances
1318
+ In our experiments, we consider the structure of the SGUFP network given in Section 3.1. To
1319
+ ensure that the problem is always feasible, we create an artificial node s0 to compensate for any
1320
+ shortage of the supply, and add an arc from the artificial supply s0 to each demand node.
1321
+ We create test instances based on the specification given in Davarnia et al. (2019), which is
1322
+ inspired by realistic models. In particular, we consider a base rail network G′ = (V ′,A′) where 10%
1323
+ and 30% of the nodes are supply and demand nodes, respectively. We assume that 50% of the
1324
+ nodes must satisfy the NSNM requirement. We then create a network G = (V,A) by augmenting
1325
+ supply/demand and artificial nodes as described above with the following settings. The integer
1326
+ supply value at supply nodes is randomly selected from the interval [100,600]. The capacity of arcs
1327
+ connecting s0 to demand nodes are considered to be unbounded, and the integer capacity value
1328
+ of other arcs is randomly selected from the interval [100,300]. For each demand scenario ξ ∈ Ξ,
1329
+ the integer demand value at demand nodes is randomly chosen from the interval [100,200]. The
1330
+ reward of the arcs connecting s0 to the demand nodes are generated from the interval [−10,−5]
1331
+ to represent the cost of lost demands. The reward of the arcs connecting the source to the supply
1332
+ nodes is randomly selected from the interval [5,10], and the reward of the arcs connecting the
1333
+ demand nodes to the sink is fixed to zero since the flow of these arcs is also fixed. The reward
1334
+ of all other arcs is created randomly from the interval [−2,2] where the negative values indicate
1335
+ the cost of sending flows through congested arcs. We consider four categories of rail networks with
1336
+ |V ′| ∈ {40,60,80,100}. For each category, we create five scenario classes for the number of demand
1337
+ scenarios |Ξ| ∈ {50,100,150,200,250}. For each network category and scenario class, we create five
1338
+ random instances based on the above settings. Test instances are publicly available (Salemi and
1339
+ Davarnia 2022b).
1340
+ 4.2.
1341
+ Numerical Results
1342
+ In this section, we present the numerical results that compare the performance of the DD-BD
1343
+ formulation for the SGUFP instances with that of the MIP formulation, denoted by “MIP”, and the
1344
+ standard Benders reformulation, denoted by “BD”. All experiments are conducted on a machine
1345
+
1346
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
1347
+ 23
1348
+ running Windows 10, x64 operating system with Intel® Core i7 processor (2.60 GHz) and 32 GB
1349
+ RAM. The Gurobi optimization solver (version 9.1.1) is used to solve instances for the MIP and
1350
+ BD models. When solving problems with Gurobi, we turn off presolve and cuts for all methods
1351
+ to have a fair comparison. Tables 1-4 report the running times of each of these formulations for
1352
+ |V ′| ∈ {40,60,80,100} and |Ξ| ∈ {50,100,150,200,250} where the time limit is set to 3600 seconds.
1353
+ The symbol “ > 3600” indicates that the problem was not solved within the time limit. As evident
1354
+ in these tables, the DD-BD formulation outperforms the other alternatives. In particular, the
1355
+ gap between the solution time of the DD-BD and the MIP and BD approaches widens as the
1356
+ problem size increases. For example, as reported in Table 1, while the DD-BD approach solves all
1357
+ 25 instances in under 275 seconds, the MIP approach fails to solve 10 of them within 3600 seconds,
1358
+ 80% of which involve 200 or 250 scenarios. This shows a clear superiority of the DD-BD over the
1359
+ MIP method. Further, for most of the instances, the DD-BD approach outperforms the standard
1360
+ BD approach, rendering it as the superior solution method among all three. Figures 5-8 compare
1361
+ the performance of DD-BD, BD, and MIP formulations through box and whisker plots for each
1362
+ network size and under each scenario class. In these figures, for uniformity of illustration, we used
1363
+ 3600 seconds for the running time of instances that fail to solve the problem within that time
1364
+ limit. As the figures show, the minimum, median, and maximum of running times of the DD-BD
1365
+ method are remarkably smaller than those of the both BD and MIP methods in all cases. These
1366
+ results show the potential of the DD-BD framework in solving network problems with challenging
1367
+ combinatorial structures. In Appendix B, we present additional numerical results for the DD-BD
1368
+ approach to assess its ability to solve larger problem sizes.
1369
+ Table 1
1370
+ Running times (in seconds) of MIP, BD, and DD-BD for |V ′| = 40.
1371
+ Instance # Model
1372
+ Number of scenarios
1373
+ 50
1374
+ 100
1375
+ 150
1376
+ 200
1377
+ 250
1378
+ 1
1379
+ MIP
1380
+ 75.74 512.62 2877.19
1381
+ > 3600
1382
+ > 3600
1383
+ BD
1384
+ 141.83 313.84
1385
+ 339.81
1386
+ 451.93
1387
+ 565.82
1388
+ DD-BD
1389
+ 56.94 129.87
1390
+ 163.43
1391
+ 219.02
1392
+ 274.36
1393
+ 2
1394
+ MIP
1395
+ 67.59 275.07
1396
+ 906.10 1892.21 2235.53
1397
+ BD
1398
+ 63.44 121.25
1399
+ 141.04
1400
+ 230.81
1401
+ 235.87
1402
+ DD-BD
1403
+ 42.60
1404
+ 82.65
1405
+ 128.16
1406
+ 164.52
1407
+ 208.94
1408
+ 3
1409
+ MIP
1410
+ 94.86 753.23 2453.05
1411
+ > 3600
1412
+ > 3600
1413
+ BD
1414
+ 71.14 139.20
1415
+ 172.86
1416
+ 224.33
1417
+ 244.91
1418
+ DD-BD
1419
+ 53.32
1420
+ 93.58
1421
+ 113.93
1422
+ 178.65
1423
+ 217.33
1424
+ 4
1425
+ MIP
1426
+ 71.46 309.62
1427
+ > 3600
1428
+ > 3600
1429
+ > 3600
1430
+ BD
1431
+ 63.55 182.01
1432
+ 267.94
1433
+ 334.74
1434
+ 380.22
1435
+ DD-BD
1436
+ 46.61
1437
+ 87.81
1438
+ 130.19
1439
+ 183.23
1440
+ 253.72
1441
+ 5
1442
+ MIP
1443
+ 380.33 406.73
1444
+ > 3600
1445
+ > 3600
1446
+ > 3600
1447
+ BD
1448
+ 123.69 198.73
1449
+ 205.16
1450
+ 231.56
1451
+ 287.24
1452
+ DD-BD
1453
+ 67.04 104.78
1454
+ 138.46
1455
+ 195.69
1456
+ 231.74
1457
+
1458
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
1459
+ 24
1460
+ Table 2
1461
+ Running times (in seconds) of MIP, BD, and DD-BD for |V ′| = 60.
1462
+ Instance # Model
1463
+ Number of scenarios
1464
+ 50
1465
+ 100
1466
+ 150
1467
+ 200
1468
+ 250
1469
+ 1
1470
+ MIP
1471
+ 893.73 > 3600
1472
+ > 3600
1473
+ > 3600
1474
+ > 3600
1475
+ BD
1476
+ 241.85
1477
+ 556.18
1478
+ 582.80
1479
+ 758.54
1480
+ 933.05
1481
+ DD-BD 176.16
1482
+ 357.06
1483
+ 603.81
1484
+ 719.27
1485
+ 901.02
1486
+ 2
1487
+ MIP
1488
+ 206.87
1489
+ 811.64 1554.10
1490
+ > 3600
1491
+ > 3600
1492
+ BD
1493
+ 259.63
1494
+ 351.39
1495
+ 624.08
1496
+ 816.44 1017.95
1497
+ DD-BD 189.07
1498
+ 388.85
1499
+ 572.52
1500
+ 764.76
1501
+ 961.35
1502
+ 3
1503
+ MIP
1504
+ 139.70
1505
+ 702.96 1035.79
1506
+ > 3600
1507
+ > 3600
1508
+ BD
1509
+ 246.48
1510
+ 569.37
1511
+ 628.84
1512
+ 795.56
1513
+ 978.15
1514
+ DD-BD 142.81
1515
+ 284.65
1516
+ 422.52
1517
+ 565.23
1518
+ 725.86
1519
+ 4
1520
+ MIP
1521
+ 153.16
1522
+ 415.46
1523
+ 938.03 1681.21 2604.25
1524
+ BD
1525
+ 238.33
1526
+ 388.19
1527
+ 563.15
1528
+ 732.59
1529
+ 919.08
1530
+ DD-BD 131.29
1531
+ 262.36
1532
+ 393.18
1533
+ 521.12
1534
+ 654.71
1535
+ 5
1536
+ MIP
1537
+ 165.57
1538
+ 706.16 2447.15
1539
+ > 3600
1540
+ > 3600
1541
+ BD
1542
+ 194.12
1543
+ 244.61
1544
+ 479.32
1545
+ 463.63
1546
+ 617.09
1547
+ DD-BD 112.09
1548
+ 221.30
1549
+ 332.25
1550
+ 443.96
1551
+ 556.33
1552
+ Table 3
1553
+ Running times (in seconds) of MIP, BD, and DD-BD for |V ′| = 80.
1554
+ Instance # Model
1555
+ Number of scenarios
1556
+ 50
1557
+ 100
1558
+ 150
1559
+ 200
1560
+ 250
1561
+ 1
1562
+ MIP
1563
+ 215.82
1564
+ 860.21
1565
+ > 3600
1566
+ > 3600
1567
+ > 3600
1568
+ BD
1569
+ 588.51
1570
+ 806.61 1731.50 1860.12 2051.52
1571
+ DD-BD 256.12
1572
+ 500.52
1573
+ 757.68 1025.88 1278.13
1574
+ 2
1575
+ MIP
1576
+ 479.76
1577
+ > 3600
1578
+ > 3600
1579
+ > 3600
1580
+ > 3600
1581
+ BD
1582
+ 398.29
1583
+ 713.01
1584
+ 861.65 1080.79 1709.04
1585
+ DD-BD 184.34
1586
+ 379.04
1587
+ 724.66 1088.21 1587.90
1588
+ 3
1589
+ MIP
1590
+ 238.79
1591
+ 996.22
1592
+ > 3600
1593
+ > 3600
1594
+ > 3600
1595
+ BD
1596
+ 702.18 1236.58 1650.42 1773.63 2227.89
1597
+ DD-BD 285.13
1598
+ 518.46
1599
+ 778.97 1046.39 1326.22
1600
+ 4
1601
+ MIP
1602
+ 404.26 2441.64 2855.29
1603
+ > 3600
1604
+ > 3600
1605
+ BD
1606
+ 572.83 1219.37 1334.21 1745.91 2089.80
1607
+ DD-BD 263.78
1608
+ 665.30 1230.81 1277.93 1444.02
1609
+ 5
1610
+ MIP
1611
+ 778.50
1612
+ > 3600
1613
+ > 3600
1614
+ > 3600
1615
+ > 3600
1616
+ BD
1617
+ 231.11
1618
+ 481.31
1619
+ 625.91 1310.24 1452.27
1620
+ DD-BD 187.34
1621
+ 376.96
1622
+ 564.34 1205.54 1412.94
1623
+ Table 4
1624
+ Running times (in seconds) of MIP, BD, and DD-BD for |V ′| = 100.
1625
+ Instance # Model
1626
+ Number of scenarios
1627
+ 50
1628
+ 100
1629
+ 150
1630
+ 200
1631
+ 250
1632
+ 1
1633
+ MIP
1634
+ 774.18
1635
+ > 3600
1636
+ > 3600
1637
+ > 3600
1638
+ > 3600
1639
+ BD
1640
+ 1282.59 1728.71 1848.49 2307.74 3309.93
1641
+ DD-BD
1642
+ 698.36 1427.38 1731.95 2014.96 3323.54
1643
+ 2
1644
+ MIP
1645
+ 480.97
1646
+ > 3600
1647
+ > 3600
1648
+ > 3600
1649
+ > 3600
1650
+ BD
1651
+ 781.47 1573.23 1820.79 2672.18 2819.61
1652
+ DD-BD
1653
+ 586.89 1171.96 1848.49 2471.49 2635.22
1654
+ 3
1655
+ MIP
1656
+ 3071.37
1657
+ > 3600
1658
+ > 3600
1659
+ > 3600
1660
+ > 3600
1661
+ BD
1662
+ 1072.14 1322.96 2112.50 2951.55 3412.99
1663
+ DD-BD
1664
+ 485.31
1665
+ 703.70 1055.36 1803.66 2269.97
1666
+ 4
1667
+ MIP
1668
+ 838.79 2585.38
1669
+ > 3600
1670
+ > 3600
1671
+ > 3600
1672
+ BD
1673
+ 1548.93 1738.92 2580.53 2616.19 3169.28
1674
+ DD-BD
1675
+ 554.89
1676
+ 743.64 1098.82 2052.73 3094.23
1677
+ 5
1678
+ MIP
1679
+ 714.39
1680
+ > 3600
1681
+ > 3600
1682
+ > 3600
1683
+ > 3600
1684
+ BD
1685
+ 808.48 1013.68 1722.01 2824.14 3282.10
1686
+ DD-BD
1687
+ 353.48
1688
+ 700.57 1680.60 2213.81 2907.78
1689
+
1690
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
1691
+ 25
1692
+ Figure 5
1693
+ Comparison of DD-BD, BD, and MIP models when |V ′| = 40 under five scenarios
1694
+ Figure 6
1695
+ Comparison of DD-BD, BD, and MIP models when |V ′| = 60 under five scenarios
1696
+ We conclude this section by noting that, while the focus of this paper has been on the unit train
1697
+ problem with the no-split no-merge requirements, the proposed DD-BD framework can be applied
1698
+ to model network problems that contain additional side constraints on the flow variables, as those
1699
+ constraints can be handled in the subproblems while the DD structure in the master problem
1700
+
1701
+ 4000.00
1702
+ 3500.00
1703
+ 3000.00
1704
+ Running time (sec)
1705
+ 2500.00
1706
+ 2000.00
1707
+ 1500.00
1708
+ 1000.00
1709
+ 500.00
1710
+ 0.00
1711
+ 50
1712
+ 100
1713
+ 150
1714
+ 200
1715
+ 250
1716
+ Number of scenarios
1717
+ DD-BDBDMIP4000.00
1718
+ 3500.00
1719
+ 3000.00
1720
+ Running time (sec)
1721
+ 2500.00
1722
+ 2000.00
1723
+ 1500.00
1724
+ 1000.00
1725
+ 500.00
1726
+ 0.00
1727
+ 50
1728
+ 100
1729
+ 150
1730
+ 200
1731
+ 250
1732
+ Number of scenarios
1733
+ IDD-BD
1734
+ ■BDMIPSalemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
1735
+ 26
1736
+ Figure 7
1737
+ Comparison of DD-BD, BD, and MIP models when |V ′| = 80 under five scenarios
1738
+ Figure 8
1739
+ Comparison of DD-BD, BD, and MIP models when |V ′| = 100 under five scenarios
1740
+ remains intact. Examples of such side constraints include the usage-fee limitation (Holzhauser,
1741
+ Krumke, and Thielen 2017b) and the flow ratio requirement (Holzhauser, Krumke, and Thielen
1742
+ 2017a). Applying the DD-BD method to such network models and assessing its effectiveness com-
1743
+ pared to alternative approaches could be an interesting direction for future research.
1744
+
1745
+ 4000.00
1746
+ 3500.00
1747
+ 3000.00
1748
+ Running time (sec)
1749
+ 2500.00
1750
+ 2000.00
1751
+ 1500.00
1752
+ 1000.00
1753
+ 500.00
1754
+ 0.00
1755
+ 50
1756
+ 100
1757
+ 150
1758
+ 200
1759
+ 250
1760
+ Number of scenarios
1761
+ DD-BDBDMIP4000.00
1762
+ 3500.00
1763
+ 3000.00
1764
+ time (sec)
1765
+ 2500.00
1766
+ 2000.00
1767
+ Running
1768
+ 1500.00
1769
+ 1000.00
1770
+ 500.00
1771
+ 0.00
1772
+ 50
1773
+ 100
1774
+ 150
1775
+ 200
1776
+ 250
1777
+ Number of scenarios
1778
+ DD-BDBDMIPSalemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
1779
+ 27
1780
+ 5.
1781
+ Conclusion
1782
+ In this paper, we introduce a DD-based framework to solve the SGUFP. This framework uses
1783
+ Benders decomposition to decompose the SGUFP into a master problem composed of the combi-
1784
+ natorial NSNM constraints, and a subproblem that solves a continuous network flow model. The
1785
+ master problem is modeled by a DD, which is successively refined with respect to the cuts generated
1786
+ through subproblems. To assess the performance of the proposed method, we apply it to a variant
1787
+ of unit train scheduling problem formulated as a SGUFP, and compare it with the standard MIP
1788
+ and Benders reformulation of the problem.
1789
+ Acknowledgments
1790
+ This project is sponsored in part by the Iowa Energy Center, Iowa Economic Development Authority and
1791
+ its utility partners. We thank the anonymous referees and the Associate Editor for their helpful comments
1792
+ that contributed to improving the paper.
1793
+
1794
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
1795
+ 28
1796
+ References
1797
+ Abbink E, Van den Berg B, Kroon L, Salomon M, 2004 Allocation of railway rolling stock for passenger
1798
+ trains. Transportation Science 38(1):33–41.
1799
+ Alfieri A, Groot R, Kroon L, Schrijver A, 2006 Efficient circulation of railway rolling stock. Transportation
1800
+ Science 40(3):378–391.
1801
+ Andersen HR, Hadzic T, Hooker JN, Tiedemann P, 2007 A constraint store based on multivalued decision
1802
+ diagrams. International Conference on Principles and Practice of Constraint Programming, 118–132
1803
+ (Springer).
1804
+ Association of American Railroads, 2021 Freight railroads fact sheet. https://www.aar.org, Accessed:
1805
+ 06/28/2021.
1806
+ Baier G, K¨ohler E, Skutella M, 2005 The k-splittable flow problem. Algorithmica 42(3):231–248.
1807
+ Bergman D, Cire AA, 2018 Discrete nonlinear optimization by state-space decompositions. Management
1808
+ Science 64(10):4700–4720.
1809
+ Bergman D, Cire AA, Van Hoeve WJ, Hooker J, 2016a Decision diagrams for optimization, volume 1
1810
+ (Springer).
1811
+ Bergman D, Cire AA, Van Hoeve WJ, Hooker JN, 2016b Discrete optimization with decision diagrams.
1812
+ INFORMS Journal on Computing 28(1):47–66.
1813
+ Bornd¨orfer R, Reuther M, Schlechte T, Waas K, Weider S, 2016 Integrated optimization of rolling stock
1814
+ rotations for intercity railways. Transportation Science 50(3):863–877.
1815
+ Cacchiani V, Toth P, 2012 Nominal and robust train timetabling problems. European Journal of Operational
1816
+ Research 219(3):727–737.
1817
+ Carey M, Crawford I, 2007 Scheduling trains on a network of busy complex stations. Transportation Research
1818
+ Part B: Methodological 41(2):159–178.
1819
+ Ceselli A, Gatto M, L¨ubbecke ME, Nunkesser M, Schilling H, 2008 Optimizing the cargo express service of
1820
+ swiss federal railways. Transportation Science 42(4):450–465.
1821
+ Chakrabarti A, Chekuri C, Gupta A, Kumar A, 2007 Approximation algorithms for the unsplittable flow
1822
+ problem. Algorithmica 47(1):53–78.
1823
+ Cordeau JF, Toth P, Vigo D, 1998 A survey of optimization models for train routing and scheduling. Trans-
1824
+ portation Science 32(4):380–404.
1825
+ Cornelsen S, Di Stefano G, 2007 Track assignment. Journal of Discrete Algorithms 5(2):250–261.
1826
+ Davarnia D, 2021 Strong relaxations for continuous nonlinear programs based on decision diagrams. Opera-
1827
+ tions Research Letters 49(2):239–245.
1828
+ Davarnia D, Richard JPP, I¸cy¨uz-Ay E, Taslimi B, 2019 Network models with unsplittable node flows with
1829
+ application to unit train scheduling. Operations Research 67(4):1053–1068.
1830
+
1831
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
1832
+ 29
1833
+ Davarnia D, Van Hoeve WJ, 2020 Outer approximation for integer nonlinear programs via decision diagrams.
1834
+ Mathematical Programming 1–40.
1835
+ Demir E, Burgholzer W, Hruˇsovsk`y M, Arıkan E, Jammernegg W, Van Woensel T, 2016 A green inter-
1836
+ modal service network design problem with travel time uncertainty. Transportation Research Part B:
1837
+ Methodological 93:789–807.
1838
+ Fuchsberger M, L¨uthi P, 2007 Solving the train scheduling problem in a main station area via a resource
1839
+ constrained space-time integer multi-commodity flow. Institute for Operations Research ETH Zurich .
1840
+ Furchtgott-Roth D, Hu PS, Nguyen L, Jahanmir S, Moore WH, Riley D, Beningo S, Chambers M, Smith-
1841
+ Pickel S, Thai H, et al., 2021 Pocket Guide to Transportation 2021 .
1842
+ Gong C, Shi J, Wang Y, Zhou H, Yang L, Chen D, Pan H, 2021 Train timetabling with dynamic and ran-
1843
+ dom passenger demand: A stochastic optimization method. Transportation Research Part C: Emerging
1844
+ Technologies 123:102963.
1845
+ Gonzalez JE, Cire AA, Lodi A, Rousseau LM, 2020 Integrated integer programming and decision diagram
1846
+ search tree with an application to the maximum independent set problem. Constraints 1–24.
1847
+ Haahr J, Lusby RM, 2017 Integrating rolling stock scheduling with train unit shunting. European Journal of
1848
+ Operational Research 259(2):452–468.
1849
+ Haahr JT, Wagenaar JC, Veelenturf LP, Kroon LG, 2016 A comparison of two exact methods for passenger
1850
+ railway rolling stock (re) scheduling. Transportation Research Part E: Logistics and Transportation
1851
+ Review 91:15–32.
1852
+ Had˘zi´c T, Hooker J, 2006 Discrete global optimization with binary decision diagrams. Workshop on Global
1853
+ Optimization: Integrating Convexity, Optimization, Logic Programming, and Computational Algebraic
1854
+ Geometry (GICOLAG). Vienna.
1855
+ Harrod S, Gorman MF, 2010 Operations research for freight train routing and scheduling. Wiley Encyclopedia
1856
+ of Operations Research and Management Science .
1857
+ Heil J, Hoffmann K, Buscher U, 2020 Railway crew scheduling: Models, methods and applications. European
1858
+ Journal of Operational Research 283(2):405–425.
1859
+ Holzhauser M, Krumke SO, Thielen C, 2017a Maximum flows in generalized processing networks. Journal of
1860
+ Combinatorial Optimization 33(4):1226–1256.
1861
+ Holzhauser M, Krumke SO, Thielen C, 2017b A network simplex method for the budget-constrained minimum
1862
+ cost flow problem. European journal of operational research 259(3):864–872.
1863
+ Hosseininasab A, Van Hoeve WJ, 2021 Exact multiple sequence alignment by synchronized decision diagrams.
1864
+ INFORMS Journal on Computing 33(2):721–738.
1865
+ Hu Y, Lan J, Wan C, 2009 An algorithm for unsplittable flow problem in flexible reconfigurable network. 2009
1866
+ Fourth International Conference on Frontier of Computer Science and Technology, 543–547 (IEEE).
1867
+
1868
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
1869
+ 30
1870
+ Huntley CL, Brown DE, Sappington DE, Markowicz BP, 1995 Freight routing and scheduling at CSX trans-
1871
+ portation. Interfaces 25(3):58–71.
1872
+ I¸cy¨uz IE, Richard JPP, Eskigun E, Acharya D, 2016 A two-model solution approach for the monthly coal
1873
+ train reservations planning problem. Transportation Science 50(3):926–946.
1874
+ Jin G, He S, Li J, Guo X, Li Y, 2019 An approach for train stop planning with variable train length and stop
1875
+ time of high-speed rail under stochastic demand. IEEE Access 7:129690–129708.
1876
+ Jordan WC, Turnquist MA, 1983 A stochastic, dynamic network model for railroad car distribution. Trans-
1877
+ portation Science 17(2):123–145.
1878
+ Jovanovi´c D, Harker PT, 1991 Tactical scheduling of rail operations: the scan i system. Transportation Science
1879
+ 25(1):46–64.
1880
+ Kleinberg JM, 1996 Approximation algorithms for disjoint paths problems. Ph.D. thesis, Massachusetts Insti-
1881
+ tute of Technology.
1882
+ Kolman P, Scheideler C, 2006 Improved bounds for the unsplittable flow problem. Journal of Algorithms
1883
+ 61(1):20–44.
1884
+ Kwan RS, 2011 Case studies of successful train crew scheduling optimisation. Journal of Scheduling
1885
+ 14(5):423–434.
1886
+ Larsen R, Pranzo M, D’Ariano A, Corman F, Pacciarelli D, 2014 Susceptibility of optimal train schedules to
1887
+ stochastic disturbances of process times. Flexible Services and Manufacturing Journal 26(4):466–489.
1888
+ Lawley M, Parmeshwaran V, Richard JP, Turkcan A, Dalal M, Ramcharan D, 2008 A time–space scheduling
1889
+ model for optimizing recurring bulk railcar deliveries. Transportation Research Part B: Methodological
1890
+ 42(5):438–454.
1891
+ Layeb SB, Jaoua A, Jbira A, Makhlouf Y, 2018 A simulation-optimization approach for scheduling in stochas-
1892
+ tic freight transportation. Computers & Industrial Engineering 126:99–110.
1893
+ Lin Z, Kwan RS, 2014 A two-phase approach for real-world train unit scheduling. Public Transport 6(1-2):35–
1894
+ 65.
1895
+ Lin Z, Kwan RS, 2016 A branch-and-price approach for solving the train unit scheduling problem. Trans-
1896
+ portation Research Part B: Methodological 94:97–120.
1897
+ Lin Z, Kwan RS, 2018 Redundant coupling/decoupling in train unit scheduling optimization. Electronic Notes
1898
+ in Discrete Mathematics 64:45–54.
1899
+ Liu SQ, Kozan E, 2011 Optimising a coal rail network under capacity constraints. Flexible Services and
1900
+ Manufacturing Journal 23(2):90–110.
1901
+ Lusby RM, 2008 Optimization methods for routing trains through railway junctions. Ph.D. thesis,
1902
+ ResearchSpace@ Auckland.
1903
+
1904
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
1905
+ 31
1906
+ Lusby RM, Larsen J, Ehrgott M, Ryan D, 2011 Railway track allocation: models and methods. OR spectrum
1907
+ 33(4):843–883.
1908
+ Meng L, Zhou X, 2011 Robust single-track train dispatching model under a dynamic and stochastic environ-
1909
+ ment: A scenario-based rolling horizon solution approach. Transportation Research Part B: Method-
1910
+ ological 45(7):1080–1102.
1911
+ Quaglietta E, Corman F, Goverde RM, 2013 Stability of railway dispatching solutions under a stochastic
1912
+ and dynamic environment. RailCopenhagen2013: 5th International Seminar on Railway Operations
1913
+ Modelling and Analysis (IAROR) (Institute for Transport Planning and Systems, ETH Zurich).
1914
+ Salemi H, Davarnia D, 2022a On the structure of decision diagram-representable mixed integer programs with
1915
+ application to unit commitment. Operations Research URL https://doi.org/10.1287/opre.2022.
1916
+ 2353.
1917
+ Salemi H, Davarnia D, 2022b Test instances for SGUFP. https://doi.org/10.5281/zenodo.6373664.
1918
+ Serra T, Hooker JN, 2019 Compact representation of near-optimal integer programming solutions. Mathe-
1919
+ matical Programming 1–34.
1920
+ Shen Y, Peng K, Chen K, Li J, 2013 Evolutionary crew scheduling with adaptive chromosomes. Transportation
1921
+ Research Part B: Methodological 56:174–185.
1922
+ Sherali HD, Suharko AB, 1998 A tactical decision support system for empty railcar management. Transporta-
1923
+ tion Science 32(4):306–329.
1924
+ Turner C, Tiwari A, Starr A, Blacktop K, 2016 A review of key planning and scheduling in the rail industry
1925
+ in Europe and UK. Proceedings of the Institution of Mechanical Engineers, Part F: Journal of Rail and
1926
+ Rapid Transit 230(3):984–998.
1927
+ Walkowiak K, 2006 New algorithms for the unsplittable flow problem. International Conference on Compu-
1928
+ tational Science and Its Applications, 1101–1110 (Springer).
1929
+ Ying Cs, Chow AH, Chin KS, 2020 An actor-critic deep reinforcement learning approach for metro train
1930
+ scheduling with rolling stock circulation under stochastic demand. Transportation Research Part B:
1931
+ Methodological 140:210–235.
1932
+ Zwaneveld PJ, Kroon LG, Van Hoesel SP, 2001 Routing trains through a railway station based on a node
1933
+ packing model. European Journal of Operational Research 128(1):14–33.
1934
+
1935
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
1936
+ 32
1937
+ Appendix A:
1938
+ Comparison of Master Problem Formulations
1939
+ In this section, we describe the differences between DDs in the space of w variables and those in the space
1940
+ of original y in the master problem formulation (4) in Section 3.2. First, we illustrate the size difference
1941
+ between these DDs in Example 3.
1942
+ Example 3. Consider a directed graph G = (V,A) with node set V = {1,2,q,3,4} and arc set A =
1943
+ {(1,q),(2,q),(q,3),(q,4)} where the central node q is subject to NSNM constraints. Let ind−(1,q) =
1944
+ ind+(q,3) = 1 and ind−(2,q) = ind+(q,4) = 2. Then, the exact DDs showed in Figures 9(a) and 9(b) with
1945
+ three and five arc layers represent the feasible region of master problem (4) and (1a)-(1d), respectively, where
1946
+ −M and M are valid bounds for variable z.
1947
+ (a) A DD in the space of w variables.
1948
+ Numbers next to arcs represent labels.
1949
+ (b) A DD in the space of y variables.
1950
+ Numbers next to arcs represent labels.
1951
+ Figure 9
1952
+ Comparison of the number of arc layers for DDs in the space of w and y variables
1953
+ As evident from the above example, the main advantage of using a DD in the space of w is the reduction in
1954
+ the number of arc layers, which is the main determinant of the DDs computational efficiency. In particular,
1955
+ even though such a DD has a larger number of nodes at the layers, a relaxed DD can be constructed to limit
1956
+ the width, and hence provide an efficient relaxed DD in a smaller dimension, whereas the relaxations of the
1957
+ DD constructed in the space of y variables would still be higher-dimensional.
1958
+ To assess the computational efficiency of the solution approach in relation to the DD space, we compare
1959
+ the performance of the DD-BD method under two different settings: (i) where DDs are built in the space
1960
+ of w variables, denoted by DD-BD-w, and (ii) where DDs are built in the space of y variables, denoted by
1961
+ DD-BD-y. We report the results of these two implementations for |V ′| ∈ {40,80} and under five different
1962
+ scenarios in Table 5 and Table 6.
1963
+ As observed in these tables, the DD-BD-w solves all instances faster than DD-BD-y, with orders of
1964
+ magnitude time improvement as the problem size (number of scenarios) increases. These preliminary com-
1965
+ putational results show the advantage of designing the DD-BD method for the SGUFP in a transformed
1966
+ space of variables.
1967
+
1968
+ 2
1969
+ 0
1970
+ 1
1971
+ 0
1972
+ 2
1973
+ 0
1974
+ 2
1975
+ 1
1976
+ 0
1977
+ W
1978
+ M
1979
+ M
1980
+ M
1981
+ -M
1982
+ M
1983
+ M
1984
+ I七0
1985
+ 91,3
1986
+ 1
1987
+ y2,3
1988
+ 0
1989
+ 0
1990
+ 1
1991
+ 0
1992
+ 0
1993
+ 0
1994
+ b
1995
+ y2,4
1996
+ 0
1997
+ 0
1998
+ 0
1999
+ M
2000
+ M
2001
+ M
2002
+ -M
2003
+ 2
2004
+ M
2005
+ MSalemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
2006
+ 33
2007
+ Table 5
2008
+ Running times (in seconds) of DD-BD-w and DD-BD-y for |V ′| = 40.
2009
+ Instance # Model
2010
+ Number of scenarios
2011
+ 50
2012
+ 100
2013
+ 150
2014
+ 200
2015
+ 250
2016
+ 1
2017
+ DD-BD-w
2018
+ 56.94 129.87 163.43 219.02 274.36
2019
+ DD-BD-y
2020
+ 89.68 304.08 432.34 642.70 839.57
2021
+ 2
2022
+ DD-BD-w
2023
+ 42.60
2024
+ 82.65 128.16 164.52 208.94
2025
+ DD-BD-y
2026
+ 68.23 148.76 244.53 344.86 605.04
2027
+ 3
2028
+ DD-BD-w
2029
+ 53.32
2030
+ 93.58 113.93 178.65 217.33
2031
+ DD-BD-y
2032
+ 83.05 157.67 310.07 541.33 658.98
2033
+ 4
2034
+ DD-BD-w
2035
+ 46.61
2036
+ 87.81 130.19 183.23 253.72
2037
+ DD-BD-y
2038
+ 78.11 149.26 325.31 460.73 694.57
2039
+ 5
2040
+ DD-BD-w
2041
+ 67.04 104.78 138.46 195.69 231.74
2042
+ DD-BD-y
2043
+ 109.61 223.78 351.80 532.12 669.78
2044
+ Table 6
2045
+ Running times (in seconds) of DD-BD-w and DD-BD-y for |V ′| = 80.
2046
+ Instance # Model
2047
+ Number of scenarios
2048
+ 50
2049
+ 100
2050
+ 150
2051
+ 200
2052
+ 250
2053
+ 1
2054
+ DD-BD-w 256.12
2055
+ 500.52
2056
+ 757.68 1025.88 1278.13
2057
+ DD-BD-y
2058
+ 483.42
2059
+ 977.03 1642.27 3175.72 4230.29
2060
+ 2
2061
+ DD-BD-w 184.34
2062
+ 379.04
2063
+ 724.66 1088.21 1587.90
2064
+ DD-BD-y
2065
+ 340.13
2066
+ 864.21 1856.96 3010.55 4843.67
2067
+ 3
2068
+ DD-BD-w 285.13
2069
+ 518.46
2070
+ 778.97 1046.39 1326.22
2071
+ DD-BD-y
2072
+ 568.32 1176.44 2401.98 3326.76 4283.58
2073
+ 4
2074
+ DD-BD-w 263.78
2075
+ 665.30 1230.81 1277.93 1444.02
2076
+ DD-BD-y
2077
+ 501.04 1430.77 2868.92 3356.39 4356.48
2078
+ 5
2079
+ DD-BD-w 187.34
2080
+ 376.96
2081
+ 564.34 1205.54 1412.94
2082
+ DD-BD-y
2083
+ 354.37
2084
+ 781.18 1279.73 3001.72 3834.08
2085
+ Appendix B:
2086
+ Additional Computational Experiments
2087
+ In this section, we present additional numerical results to assess the limits of the DD-BD method for larger
2088
+ problem instances. These results are given in Tables 7 and 8, where the columns are defined similarly to
2089
+ those of Tables 1-4. For these instances, the time limit is set to 3600 seconds, and the symbol “> 3600”
2090
+ indicates that the problem is not solved within this time limit.
2091
+ Table 7
2092
+ Running times (in seconds) of DD-BD for |V ′| = 120.
2093
+ Instance # Model
2094
+ Number of scenarios
2095
+ 50
2096
+ 100
2097
+ 150
2098
+ 200
2099
+ 250
2100
+ 1
2101
+ DD-BD 1494.49 2824.58
2102
+ > 3600 > 3600 > 3600
2103
+ 2
2104
+ DD-BD
2105
+ 975.47 1892.41 3198.18 > 3600 > 3600
2106
+ 3
2107
+ DD-BD 1150.30 2263.09 3454.47 > 3600 > 3600
2108
+ 4
2109
+ DD-BD 1261.59 2403.79
2110
+ > 3600 > 3600 > 3600
2111
+ 5
2112
+ DD-BD
2113
+ 906.34 1863.15 3050.68 > 3600 > 3600
2114
+
2115
+ Salemi and Davarnia: Solving Unsplittable Network Flow Problems with Decision Diagrams
2116
+ 34
2117
+ Table 8
2118
+ Running times (in seconds) of DD-BD for |V ′| = 150.
2119
+ Instance # Model
2120
+ Number of scenarios
2121
+ 50
2122
+ 100
2123
+ 150
2124
+ 200
2125
+ 250
2126
+ 1
2127
+ DD-BD 2496.16 > 3600 > 3600 > 3600 > 3600
2128
+ 2
2129
+ DD-BD 2944.20 > 3600 > 3600 > 3600 > 3600
2130
+ 3
2131
+ DD-BD 2321.62 > 3600 > 3600 > 3600 > 3600
2132
+ 4
2133
+ DD-BD 2590.34 > 3600 > 3600 > 3600 > 3600
2134
+ 5
2135
+ DD-BD 2298.36 > 3600 > 3600 > 3600 > 3600
2136
+
hNAzT4oBgHgl3EQf4f5B/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
ktE5T4oBgHgl3EQfGw6j/content/tmp_files/2301.05434v1.pdf.txt ADDED
@@ -0,0 +1,1103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LVRNet: Lightweight Image Restoration for
2
+ Aerial Images under Low Visibility
3
+ Esha Pahwa*
4
+ BITS Pilani
5
+ f20180675@pilani.bits-pilani.ac.in
6
+ Achleshwar Luthra*
7
+ Carnegie Mellon University
8
+ achleshl@andrew.cmu.edu
9
+ Pratik Narang
10
+ BITS Pilani
11
+ pratik.narang@pilani.bits-pilani.ac.in
12
+ Abstract
13
+ Learning to recover clear images from images having a
14
+ combination of degrading factors is a challenging task.
15
+ That being said, autonomous surveillance in low visibility
16
+ conditions caused by high pollution/smoke, poor air qual-
17
+ ity index, low light, atmospheric scattering, and haze dur-
18
+ ing a blizzard becomes even more important to prevent ac-
19
+ cidents. It is thus crucial to form a solution that can re-
20
+ sult in a high-quality image and is efficient enough to be
21
+ deployed for everyday use. However, the lack of proper
22
+ datasets available to tackle this task limits the performance
23
+ of the previous methods proposed. To this end, we generate
24
+ the LowVis-AFO dataset, containing 3647 paired dark-hazy
25
+ and clear images. We also introduce a lightweight deep
26
+ learning model called Low-Visibility Restoration Network
27
+ (LVRNet). It outperforms previous image restoration meth-
28
+ ods with low latency, achieving a PSNR value of 25.744 and
29
+ an SSIM of 0.905, making our approach scalable and ready
30
+ for practical use. The code and data can be found here.
31
+ 1. Introduction
32
+ Image enhancement and restoration have been a critical
33
+ area of research using both traditional digital image pro-
34
+ cessing techniques[12] [2], and the recent deep learning
35
+ frameworks[32][33][44]. The goal of image restoration is
36
+ to recover a clear image, whereas image enhancement is to
37
+ improve the quality of the degraded image. In this study, we
38
+ perform recovery of the clear image from the hazy version
39
+ while performing low-light image enhancement using a sin-
40
+ gle convolutional network, which could further be applied
41
+ to tasks such as search and rescue operations using object
42
+ detection.
43
+ *equal contribution
44
+ Using deep learning algorithms for image recovery has
45
+ many benefits, the most important being that it can general-
46
+ ize to different variations in the images captured. Hence, we
47
+ observe that deep learning-based methods on most bench-
48
+ mark datasets often outperform traditional methods signif-
49
+ icantly.
50
+ However, there are still challenges that the re-
51
+ searchers have to tackle for image restoration.
52
+ Publicly
53
+ available datasets containing a variety of degrading factors
54
+ that model real-world scenarios are few. Hence, most pre-
55
+ vious works have focused on removing one type of degra-
56
+ dation with a specific intensity level. From the perspective
57
+ of computational complexity, recent deep learning methods
58
+ are computationally expensive, and thus they can’t be de-
59
+ ployed on edge devices. Moreover, image restoration has
60
+ been a long-standing ill-posed research problem, as there
61
+ are infinite mappings between the degraded and the clear
62
+ image. Thus, the existing methods still have room for im-
63
+ provement in finding the correct mapping.
64
+ In this work, we focus on developing an end-to-end
65
+ lightweight deep-learning solution for the image restoration
66
+ task. Our major contributions are listed below:
67
+ • Taking inspiration from Non-linear Activation Free
68
+ Network (NAFNet) [5] and Level Attention Module
69
+ [45], we propose a novel algorithm - Low-Visibility
70
+ Restoration Network (LVRNet), that can effectively re-
71
+ cover high-quality images from degraded images taken
72
+ in poor visual conditions (Figure 1).
73
+ • Due to the lack of available datasets that exhibit a com-
74
+ bination of adverse effects, we generate a new dataset,
75
+ namely LowVis-AFO (abbreviation for Low-Visibility
76
+ Aerial Floating Objects dataset). We use AFO [15] as
77
+ our ground truth dataset and synthesize dark hazy im-
78
+ ages. The data generation process has been elaborated
79
+ in Section 4.1.
80
+ 1
81
+ arXiv:2301.05434v1 [cs.CV] 13 Jan 2023
82
+
83
+ Input
84
+ Zero-DCE
85
+ SGZNet
86
+ DehazeNet
87
+ StarDCE
88
+ BPPNet
89
+ FFANet
90
+ MSBDN-DFF
91
+ Ground Truth / Reference Image
92
+ Our Result
93
+ Input
94
+ Zero-DCE
95
+ SGZNet
96
+ DehazeNet
97
+ StarDCE
98
+ BPPNet
99
+ FFANet
100
+ MSBDN-DFF
101
+ Ground Truth / Reference Image
102
+ Our Result
103
+ Figure 1. Visual results on the proposed LowVis-AFO dataset. The method used to obtain each result has been mentioned under the
104
+ image.
105
+ • Benchmarking experiments have been provided on the
106
+ LowVis-AFO dataset to help future researchers for
107
+ quantitative comparison.
108
+ Along with that, LVRNet
109
+ surpasses the results obtained using previous image
110
+ restoration techniques by a significant margin.
111
+ • We perform extensive ablation studies to analyze the
112
+ importance of various loss functions existing in cur-
113
+ rent image restoration research. These experiments are
114
+ discussed in detail in Section 5.
115
+ 2. Related Works
116
+ This section highlights the previous work done in the fields
117
+ of image dehazing and low-light image enhancement and
118
+ their limitations.
119
+ 2.1. Image Dehazing
120
+ Hazy weather is often seen due to floating particles in
121
+ the environment which degrade the quality of the image
122
+ captured. Therefore, many previous works have tried to
123
+ recover a clear image from the hazy one.
124
+ These works
125
+ can be divided into two methods, ones that rely on prior
126
+ assumptions [17] and the atmospheric scattering model
127
+ (ASM) [31] and the others which use deep learning to solve
128
+ the problem, either by combination with ASM [3][34][36]
129
+ or independently [25][26][33][46][50].
130
+ Conventional
131
+ approaches are physically inspired and apply various types
132
+ of sharp image priors to regularize the solution space.
133
+ However, they exhibit shortcomings when implemented
134
+ with real-world images and videos. For example, the dark
135
+ channel prior method (DCP) [31] does not perform well
136
+ in regions containing the sky. These methods [1][11][24]
137
+ are known to be computationally expensive and require
138
+ 2
139
+
140
+ Pre-processing
141
+ Conv
142
+ Post-processing
143
+ Conv.
144
+ NAF-G1
145
+ NAF-G2
146
+ +
147
+ LAN
148
+ NAF-G3
149
+ Stacked Feature
150
+ Maps
151
+ Input Image
152
+ Output Image
153
+ Figure 2. Model architecture of the proposed LVRNet. Starting from the top-left: The input image is passed to the pre-processing
154
+ convolution layers where feature maps are learned and passed to NAF Groups (here we have used 3 groups). The features extracted from
155
+ each group are concatenated (or stacked) along the channel dimension and sent as input to the Level Attention Module (LAM). Finally, we
156
+ pass LAM’s output to CNN layers for post-processing, adding the original image through residual connection and extracting the restored
157
+ image at the bottom-left.
158
+ heuristic parameter-tuning. Supervised dehazing methods
159
+ can be divided into two subparts, one is ASM based, and
160
+ the other is non-ASM based.
161
+ ASM-based Learning: MSCNN[34] solves the task of
162
+ image dehazing by dividing the problem into three steps:
163
+ using CNN to estimate the transmission map t(x), using
164
+ statistical methods to find atmospheric light A and then
165
+ recover the clear image J(x) using t(x) and A jointly. Meth-
166
+ ods like LAP-Net [23] adopt the relation of depth with the
167
+ amount of haze in the image. The farther the scene from the
168
+ camera, the denser the haze would be. Hence it considers
169
+ the difference in the haze density in the input image using a
170
+ stage-wise loss, where each stage predicts the transmission
171
+ map from mild to severe haze scenes.
172
+ DehazeNet [3]
173
+ consists of four sequential operations: feature extraction,
174
+ multi-scale mapping, calculating local extremum, and
175
+ non-linear regression. MSRL-DehazeNet [43] decomposes
176
+ the problem into recovering high-frequency and basic com-
177
+ ponents. GCANet [4] employs residual learning between
178
+ haze-free and hazy images as an optimization objective.
179
+ End-to-end Learning: This subpart of previous work cor-
180
+ responds to non-ASM-based deep learning methods for re-
181
+ covering the clear image. Back-Projected Pyramid Network
182
+ (BPPNet) [39] is a generative adversarial network that in-
183
+ cludes iterative blocks of UNets [37] to learn haze features
184
+ and pyramid convolution to preserve spatial features of dif-
185
+ ferent scales. The reason behind using iterative blocks of
186
+ UNets[37] is to avoid increasing the number of encoder lay-
187
+ ers in a single UNet[37] as it leads to a decrease in height
188
+ and width of latent feature representation hence resulting in
189
+ loss of spatial information. Moreover, different blocks of
190
+ UNet learn different complexities of haze features, and the
191
+ final concatenation step ensures that all of them are taken
192
+ into account during image reconstruction. The final recon-
193
+ struction is done using the pyramid convolution block. The
194
+ output feature is post-processed to get a haze-free image.
195
+ Feature-Fusion Attention Network (FFANet) [33] adopts
196
+ the idea of an attention mechanism and skip connections
197
+ to restore haze-free images. A combination of channel at-
198
+ tention and pixel attention is introduced, which helps the
199
+ network, deal with the uneven spatial distribution of haze
200
+ and different weighted information across channels. Au-
201
+ toencoders [6], hierarchical networks [9], and dense block
202
+ networks [14] has also been proposed for the task of image
203
+ dehazing. However, our main comparison lies with FFANet
204
+ [33], wherein we show a huge improvement compared to
205
+ the former method with a model containing a lesser number
206
+ of parameters and which can generalize to different levels
207
+ of haze.
208
+ 2.2. Low-light Enhancement
209
+ Traditional methods for low-light image enhancement
210
+ (LLIE) include Histogram Equalization-based methods and
211
+ Retinex model-based methods. Recent research has been
212
+ focused on developing deep learning-based methods fol-
213
+ lowing the success of the first seminal work. Deep learning-
214
+ based solutions are more accurate, robust, and have a
215
+ shorter inference time thus attracting more researchers.
216
+ 3
217
+
218
+ Layer Norm
219
+ 1x1, conv
220
+ 3x3, dconv
221
+ Simple Gate
222
+ SCA
223
+ 1x1, conv
224
+ +
225
+ Layer Norm
226
+ 1x1, conv
227
+ Simple Gate
228
+ 1x1, conv
229
+ +
230
+ I/P
231
+ O/P
232
+ NAF BLOCK
233
+ NAF BLOCK
234
+ NAF BLOCK
235
+ CONV
236
+ +
237
+ NAF-Group
238
+ NAF Block
239
+ Figure 3. Architecture of NAF Block and NAF Group. NAF Blocks are the building blocks of NAF Groups. A detailed description has
240
+ been provided in Section 3.1 and Section 3.1.1
241
+ Learning strategies used in these methods are mainly su-
242
+ pervised learning [27, 29, 30, 35, 51, 28, 41], unsupervised
243
+ learning [20], and zero-shot learning [49, 13].
244
+ Supervised Learning: The first deep learning-based LLIE
245
+ method LLNet [27] is an end-to-end network that employs a
246
+ variant of stacked-sparse denoising autoencoder to brighten
247
+ and denoise low-light images simultaneously. LLNet in-
248
+ spired many other works [29, 30, 35, 51], but they do not
249
+ consider the observation that noise exhibits different lev-
250
+ els of contrast in different frequency layers. Later, Xu et
251
+ al. [41] proposed a network that suppresses noise in the
252
+ low-frequency layers and recovers the image contents by
253
+ inferring the details in high-frequency layers. There is an-
254
+ other division of methods that is based on the Retinex the-
255
+ ory. Deep Retinex-based models [40, 42] decomposes the
256
+ image into two separate components - light-independent
257
+ reflectance and structure-aware smooth illumination. The
258
+ final estimated reflection component is treated as the en-
259
+ hanced result.
260
+ Unsupervised Learning: Although the above-mentioned
261
+ methods perform well on synthetic data, they show limited
262
+ generalization capability on real-world low-light images.
263
+ This might be the result of overfitting. EnlightenGAN [20]
264
+ proposed to solve this issue by adopting an unsupervised
265
+ learning technique, i.e., avoiding the use of paired synthetic
266
+ data. This work uses attention-guided UNet as a generator
267
+ and global-local discriminators to achieve the objective of
268
+ LLIE.
269
+ Zero-short Learning: These methods, in low-level vision
270
+ tasks, do not require any paired or unpaired training data.
271
+ Zero-reference Deep Curve Estimation [13] formulates im-
272
+ age enhancement as a task of image-specific deep curve
273
+ estimation, taking into account pixel value range, mono-
274
+ tonicity, and differentiability. It is a lightweight DCE-Net
275
+ that doesn’t require paired or unpaired ground truth images
276
+ during training and relies on non-reference loss functions
277
+ that measure the enhancement quality hence driving the
278
+ learning of the network. Another such method, Semantic-
279
+ guided Zero-shot low-light enhancement Network [49] is a
280
+ lightweight model for low-light enhancement factor extrac-
281
+ tion which is inspired by the architecture of U-Net [37]. The
282
+ output of this network is fed to a recurrent image enhance-
283
+ ment network, along with the degraded input image. Each
284
+ stage in this network considers the enhancement factor and
285
+ the output from the previous scale as its input. This is fol-
286
+ lowed by a feature-pyramid network that aims to preserve
287
+ the semantic information in the image.
288
+ More recently, researchers have experimented with trans-
289
+ formers for Zero-shot Learning LLIE. Structure-Aware
290
+ lightweight Transformer (STAR) [47] focuses on real-time
291
+ image enhancement without using deep-stacked CNNs or
292
+ large transformer models. STAR is formulated to capture
293
+ long-range dependencies between separate image patches,
294
+ facilitating the model to learn structural relationships be-
295
+ tween different regions of the images. In STAR, patches of
296
+ the image are tokenized into token embeddings. The tokens
297
+ generated as an intermediate stage are passed to a long-
298
+ short-range transformer that outputs two long and short-
299
+ range structural maps. These structural maps can further
300
+ predict curve estimation or transformation for image en-
301
+ hancement tasks. Although these methods show impressive
302
+ results for the study of low-light image enhancement for
303
+ which it originally developed, they cannot deal with foggy
304
+ low– light images.
305
+ 2.3. Limitations
306
+ Previous works have relied on ASM-based methods in the
307
+ case of dehazing and Retinex model-based methods for low-
308
+ light image enhancement.
309
+ However, these methods fail
310
+ to generalize to real-world images. Recent deep learning-
311
+ based methods using large networks solve the task of im-
312
+ age dehazing and low-light enhancement separately. To our
313
+ knowledge, no work is introduced that solves the two prob-
314
+ lems in a collaborative network. Deep learning methods
315
+ 4
316
+
317
+ also fail to generalize to different haze levels and darkness.
318
+ 3. Proposed Methodology
319
+ In this section, we provide a detailed description of the over-
320
+ all architecture proposed and the individual components in-
321
+ cluded in the network.
322
+ 3.1. Architecture
323
+ Like the group structure in [33], each group in our network
324
+ consists of a K NAF Block [5] with a skip connection at
325
+ the end as shown in Figure 3. The output of each group is
326
+ concatenated, passed to the level attention module to find
327
+ the weighted importance of the feature maps obtained, and
328
+ post-processed using two convolutional layers. A long skip
329
+ connection for global residual learning accompanies this.
330
+ 3.1.1
331
+ NAF-Block
332
+ To keep this work self-contained, we explain the NAF Block
333
+ [5] in this subsection. NAF Block is the building block
334
+ of Nonlinear Activation Free Network. Namely NAFNet
335
+ [5]. To avoid over-complexity in the architecture, this block
336
+ avoids using any activation functions like ReLU, GELU,
337
+ Softmax, etc. hence keeping a check on the intra-block com-
338
+ plexity of the network.
339
+ The input first passes through Layer Normalization as it can
340
+ help stabilize the training process. This is followed by con-
341
+ volution operations and a Simple Gate (SG). SG is a variant
342
+ of Gated Linear Units (GLU) [10] as evident from the fol-
343
+ lowing equations 1 and 2
344
+ GLU(X, f, g, σ) = f(X) ⊙ σ(g(X))
345
+ (1)
346
+ S impleGate(X, Y) = X ⊙ Y
347
+ (2)
348
+ and a replacement for GELU[18] activation function be-
349
+ cause of the similarity between GLU and GELU (Equa-
350
+ tion 3).
351
+ GELU(x) = xφ(x)
352
+ (3)
353
+ In Simple Gate, the feature maps are divided into two parts
354
+ along the channel dimension and then multiplied as shown
355
+ in Figure 4. Another novelty introduced in this block is
356
+ Simplified Channel Attention (SCA). Channel Attention
357
+ (CA) can be expressed as:
358
+ CA(X) = X ⊗ σ(W2max(0, W1pool(X)))
359
+ (4)
360
+ where X represents the feature map, pool indicates the
361
+ global average pooling operation,σ is Sigmoid, W1, W2 are
362
+ fully-connected layers and ⊗ is a channel-wise product op-
363
+ eration. This can be taken as a special case of GLU from
364
+ W
365
+ W
366
+ W
367
+ H
368
+ H
369
+ H
370
+ C/2
371
+ C/2
372
+ C/2
373
+ Figure 4. Simple Gate as represented by Equation 2 ⊗ denotes
374
+ channel-wise multiplicaWere
375
+ which we can derivate the equation for Simplified Channel
376
+ Attention:
377
+ SCA(X) = X ⊗ Wpool(X)
378
+ (5)
379
+ 3.1.2
380
+ Level Attention Module
381
+ Once we have extracted features from all the NAF Groups,
382
+ we concatenate them and pass them through the Level At-
383
+ tention Module (LAM) [45]. This module learns attention
384
+ weights for features obtained at different levels.
385
+ In LAM, each feature map is first reshaped to a 2D matrix
386
+ of the size K × HWC, where K, H, W, and C are the no. of
387
+ NAF Groups, height, width, and no. of channels of the fea-
388
+ ture maps respectively. We find a correlation matrix of this
389
+ 2D matrix by multiplying it with its transpose matrix. Fi-
390
+ nally, we multiply the 2D matrix with this correlation ma-
391
+ trix and reshape it to K × H × W × C tensor. Inspired by
392
+ residual learning, this tensor is substituted for residual and
393
+ is added to the original concatenated feature maps. The re-
394
+ sultant features are then reshaped to H × W × KC, passing
395
+ through 1 × 1 convolution operation to get the H × W × C
396
+ feature map. This is passed through some post-processing
397
+ convolutions to get the final enhanced output. We include
398
+ its architecture diagram in the supplementary material for a
399
+ better understanding.
400
+ 3.2. Loss Functions
401
+ Four loss functions, namely, reconstruction loss, perceptual
402
+ loss, edge loss [19], and FFT loss[7], have been used to
403
+ supervise the task of image restoration.
404
+ The total loss L is defined in Equation 6, where λ1 = 0.04,
405
+ λ2 = 1 and λ3 = 0.01.
406
+ L = Ls + λ1Lp + λ2Le + λ3Lf
407
+ (6)
408
+ 3.2.1
409
+ Reconstruction Loss:
410
+ The restored clear output image is compared with its ground
411
+ truth value in the spatial domain using a standard l1 loss as
412
+ 5
413
+
414
+ demonstrated in Equation 7. We use l1 loss instead of l2 loss
415
+ as it does not over-penalize the errors and leads to better
416
+ image restoration performance [48].
417
+ Ls = 1
418
+ N
419
+ n
420
+
421
+ i=1
422
+ ∥ xgt
423
+ i − NAFNet(xdark,hazy
424
+ i
425
+ ) ∥1
426
+ (7)
427
+ In the above equation, xgt
428
+ i refers to the ground truth clear im-
429
+ age, and NAFNet(xdark,hazy
430
+ i
431
+ ) denotes the output of our pro-
432
+ posed NAFNet when a dark and hazy image is fed to the
433
+ network.
434
+ 3.2.2
435
+ Perceptual Loss:
436
+ To reduce the perceptual loss and improve the image’s vi-
437
+ sual quality, we utilize the features of the pre-trained VGG-
438
+ 19 network [38] obtained from the output of one of the
439
+ ReLU activation layers. It is defined in Equation 8, where
440
+ wi j, hij, and cij refer to the dimensions of the respective
441
+ feature maps inside the VGG-19 architecture. φij denotes
442
+ the feature maps outputted from the jth convolutional layer
443
+ inside the i-th block in the VGG network.
444
+ Lp =
445
+ 1
446
+ wijhijci j
447
+ wij
448
+
449
+ x=1
450
+ hij
451
+
452
+ y=1
453
+ cij
454
+
455
+ z=1
456
+ ∥ φij(Igt)xyz − φij(Iout)xyz ∥
457
+ (8)
458
+ 3.2.3
459
+ Edge Loss:
460
+ To recover the high-frequency details lost because of the in-
461
+ herent noise in dark and hazy images, we have an additional
462
+ edge loss to constrain the high-frequency components be-
463
+ tween the ground truth and the recovered image.
464
+ Le =
465
+
466
+ (∇2(Igt) − ∇2(Iout))2 + ϵ2
467
+ (9)
468
+ In Equation 9, ∇2 refers to the Laplacian operation [22],
469
+ which is then applied to the ground truth and the predicted
470
+ clean image to get the edge loss.
471
+ 3.2.4
472
+ FFT Loss:
473
+ To supervise the haze-free results in the frequency domain,
474
+ we add another loss called Fast Fourier transform (FFT) loss
475
+ (denoted by Lf in Equation 12. It calculates the loss of both
476
+ amplitude and phase using the l1 loss function without ad-
477
+ ditional inference cost.
478
+ Axgt
479
+ i , Pxgt
480
+ i = FFT(xgt
481
+ i ),
482
+ (10)
483
+ Axout
484
+ i , Pxout
485
+ i
486
+ = FFT(xout
487
+ i ),
488
+ (11)
489
+ L f = 1
490
+ N
491
+ n
492
+
493
+ i=1
494
+ (∥ Axgt
495
+ i − Axout
496
+ i
497
+ ∥1 + ∥ Pxgt
498
+ i − Pxout
499
+ i
500
+ ∥1)
501
+ (12)
502
+ 4. Experimental Results
503
+ To demonstrate the outcomes of our model’s approach to-
504
+ wards image enhancement under low-visibility conditions,
505
+ this section contains a detailed description of the dataset
506
+ generated and used in Section 4.1, the experimental set-
507
+ tings in Section 4.2, the metrics used for evaluation in Sec-
508
+ tion 4.3 and a discussion on the results obtained in Sec-
509
+ tion 5.1 and 5.2.
510
+ 4.1. Dataset Details
511
+ Due to the lack of available datasets that meet our require-
512
+ ments, we generate a new one using the AFO dataset [15].
513
+ The dataset generation process has been elaborated below,
514
+ and the final images have been shown in Figure 5.
515
+ • Haze effect - To add fog, imgaug [21], a well-known
516
+ python library was used. A random integral value be-
517
+ tween 3, 4, 5 was selected, representing the fog’s sever-
518
+ ity. For each image, this random number was chosen
519
+ and pre-defined functions within the package were uti-
520
+ lized to add a layer of fog to the image.
521
+ • Low-light Effect - Given a normal image, our goal is
522
+ to output a low-lit image while preserving the underly-
523
+ ing information. We follow the pipeline introduced [8],
524
+ which parametrically models the low light-degrading
525
+ transformation by observing the image signal process-
526
+ ing (ISP) pipeline between the sensor measurement
527
+ system and the final image.
528
+ The low-illumination-
529
+ degrading pipeline is a three-stage process:
530
+ – Unprocessing procedure - This part aims to syn-
531
+ thesize RAW format images from input sRGB
532
+ images by invert tone mapping, invert gamma
533
+ correction, and the transformation of the image
534
+ from sRGB space to cRGB space, and invert
535
+ white balancing.
536
+ – Low Light Corruption - This aims at adding shot
537
+ and read noises to the output of the unprocess-
538
+ ing procedure, as these are common in-camera
539
+ imaging systems. Shot noise is a type of noise
540
+ generated by the random arrival of photons in
541
+ a camera, which is a fundamental limitation.
542
+ Read noise occurs during the charge conversion
543
+ of electrons into voltage in the output amplifier,
544
+ which can be approximated using a Gaussian ran-
545
+ dom variable with zero mean and fixed variance.
546
+ – ISP Pipeline - RAW image processing is done
547
+ after the lowlight corruption process in the fol-
548
+ lowing order: add quantization noise, white bal-
549
+ ancing from cRGB to sRGB, and gamma correc-
550
+ tion, which finally outputs a degraded low-light
551
+ image.
552
+ 6
553
+
554
+ Ground Truth Images
555
+ Ground Truth Images
556
+ Generated Images
557
+ Generated Images
558
+ Figure 5. Visual illustration of a few sample images from our dataset. Columns 1 and 3 show original images taken from AFO Dataset
559
+ [15], whereas Columns 2 and 4 show their corresponding images generated as explained in Section 4.1 simulating low-visibility conditions.
560
+ • Combination of Haze and Low-light Effect - Re-
561
+ sults of implementing the low-light generation algo-
562
+ rithm described above on foggy images generated us-
563
+ ing img-aug are shown here. It can be seen that com-
564
+ bining the two (fog and low light) has introduced ad-
565
+ versity in finding the location of the objects in the wa-
566
+ ter bodies. Moreover, finding a unique solution for
567
+ such a combination has not been explored to date
568
+ 4.2. Experimental Settings
569
+ The images were resized to get the resultant dimensions as
570
+ 256 × 456. Adam optimizer with an initial learning rate of
571
+ 1e−4, β1, and β2 with a value of 0.9 and 0.999 were chosen.
572
+ The batch size was fixed as 2. We have used 3 groups in all
573
+ our experiments, each with 16 blocks. Pytorch backend was
574
+ used to compile the model and train it.
575
+ 4.3. Evaluation Metrics
576
+ We reported the results we obtained using two standard im-
577
+ age restoration metrics (i.e., PSNR and SSIM). These met-
578
+ rics will help us quantitatively evaluate the performance of
579
+ our model in terms of feature colors and structure similarity.
580
+ High PSNR and SSIM values if indicative of good results.
581
+ 5. Experimental Results
582
+ The architecture used is given in Figure 2. This section
583
+ gives a detailed analysis of the results obtained by the pro-
584
+ posed method.
585
+ 5.1. Discussion and Comparison
586
+ In this subsection, we discuss the evaluation results ob-
587
+ tained by the proposed pipeline. Previous methods were
588
+ Method
589
+ Year
590
+ PSNR
591
+ SSIM
592
+ Zero-DCE[13]
593
+ 2020
594
+ 12.323
595
+ 0.529
596
+ SGZNet[49]
597
+ 2022
598
+ 12.578
599
+ 0.519
600
+ BPPNet[39]
601
+ 2022
602
+ 15.507
603
+ 0.755
604
+ DehazeNet[3]
605
+ 2016
606
+ 15.710
607
+ 0.391
608
+ Star-DCE[47]
609
+ 2021
610
+ 16.651
611
+ 0.539
612
+ FFANet[7]
613
+ 2020
614
+ 15.050
615
+ 0.582
616
+ MSBDN-DFF[16]
617
+ 2020
618
+ 16.686
619
+ 0.689
620
+ LVRNet (Ours)
621
+ 2022
622
+ 25.744
623
+ 0.905
624
+ Table 1. Quantitative comparison of our proposed network
625
+ with previous work. The best results and the second-best results
626
+ have been highlighted with red color and blue colors, respectively.
627
+ trained on the newly generated dataset and tested to com-
628
+ pare their metrics with our model’s performance. These
629
+ methods were built to enhance the low-light image or obtain
630
+ a clear image from a hazy one. The results are mentioned
631
+ in Table 1.
632
+ We observe a huge increase in the PSNR value as compared
633
+ to Zero-DCE[13], which enhances the low-light image as a
634
+ curve estimation problem. However, it introduces an even
635
+ amplified noise leading to color degradation as seen in Fig-
636
+ ure 1. Notwithstanding its fast processing speed, Zero-DCE
637
+ has limited noise suppression and haze removal capacity.
638
+ Star-DCE[47], which uses a transformer backbone instead
639
+ of a CNN one in the Zero-DCE network, shows a 35.12%
640
+ increase in PSNR value. Owing to the added LAM struc-
641
+ 7
642
+
643
+ S.no.
644
+ Reconstruction Loss
645
+ Perceptual Loss
646
+ Edge Loss
647
+ FFT Loss
648
+ PSNR
649
+ SSIM
650
+ 1.
651
+ 
652
+ 
653
+ 
654
+ 
655
+ 24.070
656
+ 0.870
657
+ 2.
658
+ 
659
+ 
660
+ 
661
+ 
662
+ 25.455
663
+ 0.903
664
+ 3.
665
+ 
666
+ 
667
+ 
668
+ 
669
+ 25.624
670
+ 0.897
671
+ 4.
672
+ 
673
+ 
674
+ 
675
+ 
676
+ 25.719
677
+ 0.900
678
+ 5.
679
+ 
680
+ 
681
+ 
682
+ 
683
+ 25.744
684
+ 0.905
685
+ Table 2. Ablation experiments: We train our model using different combinations of loss functions to understand the importance of
686
+ individual losses for image restoration. The best results are obtained when the model is trained using all the loss functions mentioned in
687
+ this work.
688
+ ture, using which our model can focus on more important
689
+ feature maps, we can achieve a 54% higher PSNR value.
690
+ SGZNet[49] uses pretrained networks for enhancement fac-
691
+ tor estimation, thus their result is dependent on those pre-
692
+ trained weights, leading to a lower PSNR value of 12.578
693
+ on LowVis-AFO. From Figure 1, we observe that the result
694
+ obtained from SGZNet is still degraded by excessive noise
695
+ and lacks saturation. DehazeNet[3] is limited by the net-
696
+ work’s depth and cannot generalize to real-world scenarios.
697
+ Hence, it results in a low PSNR of 15.710. Methods like
698
+ BPPNet[39] and FFANet[33] are end-to-end deep learning
699
+ methods for image dehazing. BPPNet[39] distorts the color
700
+ distribution in the recovered image as it cannot remove the
701
+ dark regions, whereas FFA-Net[33] produces image with a
702
+ lower perceptual quality.
703
+ We propose an end-to-end deep learning pipeline (0.43M
704
+ parameters) that can perform image dehazing and low-light
705
+ image enhancement with a significant decrease in the num-
706
+ ber of parameters as compared to MSBDN-DFF [16] (31M
707
+ parameters) and FFA-Net[33] (4.45M parameters).
708
+ The supplementary material has provided a discussion
709
+ on the number of parameters of other models.
710
+ We also
711
+ trained the model for 10 epochs with fewer NAF blocks to
712
+ prove that we achieved better results than the lighter results,
713
+ not due to an increase in parameters but because of the self-
714
+ sufficiency of the added LAM module, non-linear activation
715
+ networks, and residual connections. The results of these ex-
716
+ periments are reported in the supplementary material.
717
+ 5.2. Ablation Studies
718
+ To prove the importance of the perceptual loss, edge loss,
719
+ and fft-loss, added to supervise the training procedure, we
720
+ conducted experiments excluding each of them and reported
721
+ the values of PSNR and SSIM in Table 2. We keep the l1
722
+ loss function constant in all experiments as it is critical in
723
+ image restoration tasks. We observe an increase in metric
724
+ values in the lower rows compared to row 1. As a result
725
+ of more supervision in the unchanged architecture, there is
726
+ an increase in the quality of clear images obtained, which
727
+ are demonstrated in the supplementary material. There is
728
+ also an increase in PSNR value (which depends on per-pixel
729
+ distance) in row 3, once we train the model without percep-
730
+ tual loss. This is seen as perceptual loss doesn’t compare
731
+ individual pixel values but the high-level features obtained
732
+ from a pretrained network. In row 4, we get a lower PSNR
733
+ value on excluding edge loss compared to row 5, as we get
734
+ lesser edge supervision. Overall, we get the best perfor-
735
+ mance when we include all the loss functions, as seen in
736
+ row 5.
737
+ 6. Conclusion
738
+ In this work, we have presented Low-Visibility Restora-
739
+ tion Network (LVRNet), a new lightweight deep learning
740
+ architecture for image restoration.
741
+ We also introduce a
742
+ new dataset, LowVis-AFO, that includes a diverse combi-
743
+ nation of synthetic darkness and haze. We also performed
744
+ benchmarking experiments on our generated dataset and
745
+ surpassed the results obtained using the previous image
746
+ restoration network by a significant margin. Qualitative and
747
+ quantitative comparison with previous work has demon-
748
+ strated the effectiveness of LVRNet. We believe our work
749
+ will motivate more research, focused on dealing with a com-
750
+ bination of adverse effects such as haze, rain, snowfall, etc.
751
+ rather than considering a single factor. In our future work,
752
+ we plan to extend LVRNet for image restoration tasks where
753
+ more factors, that negatively impact the image quality, are
754
+ taken into account.
755
+ 8
756
+
757
+ Supplementary Material
758
+ To make our submission self-contained and given the page
759
+ limitation, this supplementary material provides additional
760
+ details. Section 1 gives an overview of the number of pa-
761
+ rameters and PSNR obtained by different methods. Sec-
762
+ tion 2 contains visual results that highlight the significance
763
+ of the loss functions. Section 3 contains the ablation ex-
764
+ periment with lesser blocks, and Section 4 demonstrates the
765
+ architecture diagram of the level attention module.
766
+ 1. PSNR vs Parameters
767
+ Figure 6 presents the PSNR vs. Parameters plot that the
768
+ previous methods and our method achieved on the testing
769
+ set of LowVis-AFO. Our model outperforms the state-of-
770
+ the-art image dehazing and low-light image enhancement
771
+ methods by a good margin while having a lesser number of
772
+ parameters.
773
+ Figure 6. The PSNR vs Number of Parameters of recent image
774
+ restoration methods on the newly proposed LowVis-AFO dataset.
775
+ S.no.
776
+ #Blocks
777
+ PSNR
778
+ SSIM
779
+ #params
780
+ Runtime(s)
781
+ 1.
782
+ 14
783
+ 21.3432
784
+ 0.8626
785
+ 0.38M
786
+ 0.035
787
+ 2.
788
+ 12
789
+ 20.4302
790
+ 0.8488
791
+ 0.33M
792
+ 0.029
793
+ 3.
794
+ 10
795
+ 20.2965
796
+ 0.8494
797
+ 0.28M
798
+ 0.024
799
+ Table 3. Results of the experiments conducted on a lesser num-
800
+ ber of NAF blocks. The training was done for 10 epochs and the
801
+ metrics were obtained on the test set thereafter.
802
+ 2. Ablation Experiment on Different Loss
803
+ Functions
804
+ Figure 8 demonstrates the visual results obtained when
805
+ we conducted experiments excluding some loss functions.
806
+ The motivation behind the experiment is to highlight the
807
+ importance of the extra loss functions (perceptual loss, edge
808
+ loss, fft-loss) added to supervise our pipeline. The quanti-
809
+ tative results are given in Table 2 in the main manuscript.
810
+ 3. Ablation Experiment with Lesser Number of
811
+ Blocks
812
+ To prove the self-sufficiency of the individual components
813
+ included in our architecture such as LAM, we conduct ex-
814
+ periments with a lesser number of NAF blocks [5] and re-
815
+ ported the PSNR and SSIM obtained in Table 1. Seeing
816
+ the results, we can conclude that our model achieves better
817
+ results, not because of an increase in the number of param-
818
+ eters as compared to the lighter model, but because of the
819
+ entire pipeline adopted.
820
+ 4. Level Attention Module
821
+ As mentioned in the main text, the diagram for LAM[45]
822
+ has been provided here in the supplementary material. (re-
823
+ fer Figure 7)
824
+ References
825
+ [1] Codruta O Ancuti, Cosmin Ancuti, Chris Hermans, and
826
+ Philippe Bekaert. A fast semi-inverse approach to detect and
827
+ remove the haze from a single image. In Asian Conference
828
+ on Computer Vision, pages 501–514. Springer, 2010.
829
+ [2] Julian Besag, Jeremy York, and Annie Mollié. Bayesian im-
830
+ age restoration, with two applications in spatial statistics. An-
831
+ nals of the institute of statistical mathematics, 43(1):1–20,
832
+ 1991.
833
+ [3] Bolun Cai, Xiangmin Xu, Kui Jia, Chunmei Qing, and
834
+ Dacheng Tao. Dehazenet: An end-to-end system for single
835
+ image haze removal. IEEE Transactions on Image Process-
836
+ ing, 25(11):5187–5198, 2016.
837
+ [4] Dongdong Chen, Mingming He, Qingnan Fan, Jing Liao, Li-
838
+ heng Zhang, Dongdong Hou, Lu Yuan, and Gang Hua. Gated
839
+ context aggregation network for image dehazing and derain-
840
+ ing. In 2019 IEEE winter conference on applications of com-
841
+ puter vision (WACV), pages 1375–1383. IEEE, 2019.
842
+ [5] Liangyu Chen, Xiaojie Chu, Xiangyu Zhang, and Jian Sun.
843
+ Simple baselines for image restoration.
844
+ arXiv preprint
845
+ arXiv:2204.04676, 2022.
846
+ [6] Rongsen Chen and Edmund M-K Lai. Convolutional autoen-
847
+ coder for single image dehazing. In ICIP, pages 4464–4468,
848
+ 2019.
849
+ [7] Sung-Jin Cho, Seo-Won Ji, Jun-Pyo Hong, Seung-Won Jung,
850
+ and Sung-Jea Ko. Rethinking coarse-to-fine approach in sin-
851
+ gle image deblurring. In Proceedings of the IEEE/CVF inter-
852
+ national conference on computer vision, pages 4641–4650,
853
+ 2021.
854
+ [8] Ziteng Cui, Guo-Jun Qi, Lin Gu, Shaodi You, Zenghui
855
+ Zhang, and Tatsuya Harada. Multitask aet with orthogonal
856
+ tangent regularity for dark object detection. In Proceedings
857
+ 9
858
+
859
+ 26
860
+
861
+ Zero-DCE
862
+ SGZNet
863
+ 24
864
+ BPPNet
865
+ DehazeNet
866
+ 22
867
+ Star-DCE
868
+ FFA-Net
869
+ MSBDN-DFF
870
+ PSNR
871
+ 20
872
+ Ours
873
+ 18
874
+ 16
875
+ 14
876
+ 12
877
+ 0
878
+ 10M
879
+ 20M
880
+ 30M
881
+ NumberofParametersStacked
882
+ Feature Maps
883
+ K x H x W x C
884
+ K x HWC
885
+ Reshape
886
+ Transpose
887
+ Softmax
888
+ K x HWC
889
+ K x H x W x C
890
+ Feature Maps
891
+ Reshape
892
+ H x W x KC
893
+ Reshape
894
+ H x W x C
895
+ 1 x 1
896
+ Conv
897
+ Figure 7. Visual illustration of operations performed by Level Attention Module.
898
+ of the IEEE/CVF International Conference on Computer Vi-
899
+ sion, pages 2553–2562, 2021.
900
+ [9] Sourya Dipta Das and Saikat Dutta. Fast deep multi-patch
901
+ hierarchical network for nonhomogeneous image dehazing.
902
+ In Proceedings of the IEEE/CVF Conference on Computer
903
+ Vision and Pattern Recognition Workshops, pages 482–483,
904
+ 2020.
905
+ [10] Yann N Dauphin, Angela Fan, Michael Auli, and David
906
+ Grangier. Language modeling with gated convolutional net-
907
+ works.
908
+ In International conference on machine learning,
909
+ pages 933–941. PMLR, 2017.
910
+ [11] Raanan Fattal. Dehazing using color-lines. ACM transac-
911
+ tions on graphics (TOG), 34(1):1–14, 2014.
912
+ [12] Stuart Geman and Donald Geman.
913
+ Stochastic relaxation,
914
+ gibbs distributions, and the bayesian restoration of images.
915
+ IEEE Transactions on pattern analysis and machine intelli-
916
+ gence, (6):721–741, 1984.
917
+ [13] Chunle Guo, Chongyi Li, Jichang Guo, Chen Change Loy,
918
+ Junhui Hou, Sam Kwong, and Runmin Cong. Zero-reference
919
+ deep curve estimation for low-light image enhancement. In
920
+ Proceedings of the IEEE/CVF Conference on Computer Vi-
921
+ sion and Pattern Recognition, pages 1780–1789, 2020.
922
+ [14] Tiantong Guo, Venkateswararao Cherukuri, and Vishal
923
+ Monga. Dense123’color enhancement dehazing network. In
924
+ Proceedings of the IEEE/CVF Conference on Computer Vi-
925
+ sion and Pattern Recognition Workshops, pages 0–0, 2019.
926
+ [15] Jan G˛asienica-Józkowy, Mateusz Knapik, and Boguslaw Cy-
927
+ ganek. An ensemble deep learning method with optimized
928
+ weights for drone-based water rescue and surveillance. Inte-
929
+ grated Computer-Aided Engineering, pages 1–15, 01 2021.
930
+ [16] Dong Hang, Pan Jinshan, Hu Zhe, Lei Xiang, Zhang Xinyi,
931
+ Wang Fei, and Yang Ming-Hsuan. Multi-scale boosted de-
932
+ hazing network with dense feature fusion. In CVPR, 2020.
933
+ [17] Kaiming He, Jian Sun, and Xiaoou Tang. Single image haze
934
+ removal using dark channel prior. IEEE transactions on pat-
935
+ tern analysis and machine intelligence, 33(12):2341–2353,
936
+ 2010.
937
+ [18] Dan Hendrycks and Kevin Gimpel.
938
+ Gaussian error linear
939
+ units (gelus). arXiv preprint arXiv:1606.08415, 2016.
940
+ [19] Kui Jiang, Zhongyuan Wang, Peng Yi, Chen Chen, Baojin
941
+ Huang, Yimin Luo, Jiayi Ma, and Junjun Jiang. Multi-scale
942
+ progressive fusion network for single image deraining. In
943
+ Proceedings of the IEEE/CVF conference on computer vision
944
+ and pattern recognition, pages 8346–8355, 2020.
945
+ [20] Yifan Jiang, Xinyu Gong, Ding Liu, Yu Cheng, Chen Fang,
946
+ Xiaohui Shen, Jianchao Yang, Pan Zhou, and Zhangyang
947
+ Wang.
948
+ Enlightengan:
949
+ Deep light enhancement without
950
+ paired supervision. IEEE Transactions on Image Process-
951
+ ing, 30:2340–2349, 2021.
952
+ [21] Alexander B. Jung, Kentaro Wada, Jon Crall, Satoshi
953
+ Tanaka, Jake Graving, Christoph Reinders, Sarthak Ya-
954
+ dav, Joy Banerjee, Gábor Vecsei, Adam Kraft, Zheng Rui,
955
+ Jirka Borovec, Christian Vallentin, Semen Zhydenko, Kil-
956
+ ian Pfeiffer, Ben Cook, Ismael Fernández, François-Michel
957
+ De Rainville, Chi-Hung Weng, Abner Ayala-Acevedo,
958
+ Raphael Meudec, Matias Laporte, et al. imgaug. https:
959
+ //github.com/aleju/imgaug, 2020.
960
+ Online; accessed
961
+ 01-Feb-2020.
962
+ [22] Behzad Kamgar-Parsi and Azriel Rosenfeld.
963
+ Optimally
964
+ isotropic laplacian operator. IEEE Transactions on Image
965
+ Processing, 8(10):1467–1472, 1999.
966
+ [23] Yunan Li, Qiguang Miao, Wanli Ouyang, Zhenxin Ma, Hui-
967
+ juan Fang, Chao Dong, and Yining Quan. Lap-net: Level-
968
+ aware progressive network for image dehazing. In Proceed-
969
+ ings of the IEEE/CVF International Conference on Computer
970
+ Vision, pages 3276–3285, 2019.
971
+ [24] Zhuwen Li, Ping Tan, Robby T Tan, Danping Zou, Steven
972
+ Zhiying Zhou, and Loong-Fah Cheong. Simultaneous video
973
+ defogging and stereo reconstruction. In Proceedings of the
974
+ IEEE conference on computer vision and pattern recogni-
975
+ tion, pages 4988–4997, 2015.
976
+ [25] Xiao Liang, Runde Li, and Jinhui Tang. Selective attention
977
+ network for image dehazing and deraining. In Proceedings
978
+ of the ACM Multimedia Asia, pages 1–6. 2019.
979
+ [26] Xiaohong Liu, Yongrui Ma, Zhihao Shi, and Jun Chen. Grid-
980
+ dehazenet: Attention-based multi-scale network for image
981
+ dehazing.
982
+ In Proceedings of the IEEE/CVF international
983
+ conference on computer vision, pages 7314–7323, 2019.
984
+ [27] Kin Gwn Lore, Adedotun Akintayo, and Soumik Sarkar. Ll-
985
+ net: A deep autoencoder approach to natural low-light image
986
+ enhancement. Pattern Recognition, 61:650–662, 2017.
987
+ [28] Kun Lu and Lihong Zhang. Tbefn: A two-branch exposure-
988
+ fusion network for low-light image enhancement.
989
+ IEEE
990
+ Transactions on Multimedia, 23:4093–4105, 2020.
991
+ [29] Feifan Lv, Bo Liu, and Feng Lu. Fast enhancement for non-
992
+ uniform illumination images using light-weight cnns. In Pro-
993
+ ceedings of the 28th ACM International Conference on Mul-
994
+ timedia, pages 1450–1458, 2020.
995
+ 10
996
+
997
+ [30] Feifan Lv, Feng Lu, Jianhua Wu, and Chongsoon Lim.
998
+ Mbllen: Low-light image/video enhancement using cnns. In
999
+ BMVC, volume 220, page 4, 2018.
1000
+ [31] Earl J McCartney. Optics of the atmosphere: scattering by
1001
+ molecules and particles. New York, 1976.
1002
+ [32] Seungjun Nah, Tae Hyun Kim, and Kyoung Mu Lee. Deep
1003
+ multi-scale convolutional neural network for dynamic scene
1004
+ deblurring.
1005
+ In Proceedings of the IEEE conference on
1006
+ computer vision and pattern recognition, pages 3883–3891,
1007
+ 2017.
1008
+ [33] Xu Qin, Zhilin Wang, Yuanchao Bai, Xiaodong Xie, and
1009
+ Huizhu Jia. Ffa-net: Feature fusion attention network for sin-
1010
+ gle image dehazing. In Proceedings of the AAAI Conference
1011
+ on Artificial Intelligence, volume 34, pages 11908–11915,
1012
+ 2020.
1013
+ [34] Wenqi Ren, Si Liu, Hua Zhang, Jinshan Pan, Xiaochun Cao,
1014
+ and Ming-Hsuan Yang. Single image dehazing via multi-
1015
+ scale convolutional neural networks. In European conference
1016
+ on computer vision, pages 154–169. Springer, 2016.
1017
+ [35] Wenqi Ren, Sifei Liu, Lin Ma, Qianqian Xu, Xiangyu Xu,
1018
+ Xiaochun Cao, Junping Du, and Ming-Hsuan Yang. Low-
1019
+ light image enhancement via a deep hybrid network. IEEE
1020
+ Transactions on Image Processing, 28(9):4364–4375, 2019.
1021
+ [36] Wenqi Ren, Jinshan Pan, Hua Zhang, Xiaochun Cao, and
1022
+ Ming-Hsuan Yang. Single image dehazing via multi-scale
1023
+ convolutional neural networks with holistic edges. Interna-
1024
+ tional Journal of Computer Vision, 128(1):240–259, 2020.
1025
+ [37] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net:
1026
+ Convolutional networks for biomedical image segmentation,
1027
+ 2015.
1028
+ [38] Karen Simonyan and Andrew Zisserman. Very deep convo-
1029
+ lutional networks for large-scale image recognition, 2014.
1030
+ [39] Ayush Singh, Ajay Bhave, and Dilip K. Prasad. Single image
1031
+ dehazing for a variety of haze scenarios using back projected
1032
+ pyramid network, 2020.
1033
+ [40] Chen Wei, Wenjing Wang, Wenhan Yang, and Jiaying
1034
+ Liu. Deep retinex decomposition for low-light enhancement.
1035
+ arXiv preprint arXiv:1808.04560, 2018.
1036
+ [41] Ke Xu, Xin Yang, Baocai Yin, and Rynson WH Lau.
1037
+ Learning to restore low-light images via decomposition-and-
1038
+ enhancement. In Proceedings of the IEEE/CVF Conference
1039
+ on Computer Vision and Pattern Recognition, pages 2281–
1040
+ 2290, 2020.
1041
+ [42] Wenhan Yang, Wenjing Wang, Haofeng Huang, Shiqi Wang,
1042
+ and Jiaying Liu. Sparse gradient regularized deep retinex
1043
+ network for robust low-light image enhancement.
1044
+ IEEE
1045
+ Transactions on Image Processing, 30:2072–2086, 2021.
1046
+ [43] Chia-Hung Yeh, Chih-Hsiang Huang, and Li-Wei Kang.
1047
+ Multi-scale deep residual learning-based single image haze
1048
+ removal via image decomposition.
1049
+ IEEE Transactions on
1050
+ Image Processing, 29:3153–3167, 2019.
1051
+ [44] Kai Zhang, Wangmeng Zuo, Yunjin Chen, Deyu Meng, and
1052
+ Lei Zhang. Beyond a gaussian denoiser: Residual learning of
1053
+ deep cnn for image denoising. IEEE transactions on image
1054
+ processing, 26(7):3142–3155, 2017.
1055
+ [45] Kaihao Zhang, Wenhan Luo, Boheng Chen, Wenqi Ren,
1056
+ Bjorn Stenger, Wei Liu, Hongdong Li, and Ming-Hsuan
1057
+ Yang. Benchmarking deep deblurring algorithms: A large-
1058
+ scale multi-cause dataset and a new baseline model. arXiv
1059
+ preprint arXiv:2112.00234, 2021.
1060
+ [46] Xiaoqin Zhang, Jinxin Wang, Tao Wang, and Runhua Jiang.
1061
+ Hierarchical feature fusion with mixed convolution attention
1062
+ for single image dehazing. IEEE Transactions on Circuits
1063
+ and Systems for Video Technology, 32(2):510–522, 2021.
1064
+ [47] Zhaoyang Zhang, Yitong Jiang, Jun Jiang, Xiaogang Wang,
1065
+ Ping Luo, and Jinwei Gu.
1066
+ Star:
1067
+ A structure-aware
1068
+ lightweight transformer for real-time image enhancement. In
1069
+ Proceedings of the IEEE/CVF International Conference on
1070
+ Computer Vision, pages 4106–4115, 2021.
1071
+ [48] Hang Zhao, Orazio Gallo, Iuri Frosio, and Jan Kautz. Loss
1072
+ functions for image restoration with neural networks. IEEE
1073
+ Transactions on computational imaging, 3(1):47–57, 2016.
1074
+ [49] Shen Zheng and Gaurav Gupta. Semantic-guided zero-shot
1075
+ learning for low-light image/video enhancement.
1076
+ In Pro-
1077
+ ceedings of the IEEE/CVF Winter Conference on Applica-
1078
+ tions of Computer Vision, pages 581–590, 2022.
1079
+ [50] Zhuoran Zheng, Wenqi Ren, Xiaochun Cao, Xiaobin Hu,
1080
+ Tao Wang, Fenglong Song, and Xiuyi Jia.
1081
+ Ultra-high-
1082
+ definition image dehazing via multi-guided bilateral learn-
1083
+ ing. In 2021 IEEE/CVF Conference on Computer Vision and
1084
+ Pattern Recognition (CVPR), pages 16180–16189. IEEE,
1085
+ 2021.
1086
+ [51] Minfeng Zhu, Pingbo Pan, Wei Chen, and Yi Yang. Eemefn:
1087
+ Low-light image enhancement via edge-enhanced multi-
1088
+ exposure fusion network. In Proceedings of the AAAI Con-
1089
+ ference on Artificial Intelligence, volume 34, pages 13106–
1090
+ 13113, 2020.
1091
+ 11
1092
+
1093
+ LP
1094
+ LPF
1095
+ LPE
1096
+ LEF
1097
+ LPEF
1098
+ GT
1099
+ Input
1100
+ Figure 8. Qualitative results obtained from experiments conducted on different loss functions. In the figure, L = L1 loss, P = Perceptual
1101
+ Loss, E = Edge loss and F = FFT loss.
1102
+ 12
1103
+