jackkuo commited on
Commit
56e21e0
·
verified ·
1 Parent(s): ad360fd

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. -9AyT4oBgHgl3EQf3vkG/vector_store/index.faiss +3 -0
  2. .gitattributes +67 -0
  3. 09FAT4oBgHgl3EQfjh3M/vector_store/index.faiss +3 -0
  4. 09FAT4oBgHgl3EQfjh3M/vector_store/index.pkl +3 -0
  5. 0NFQT4oBgHgl3EQfzzas/content/tmp_files/2301.13414v1.pdf.txt +1833 -0
  6. 0NFQT4oBgHgl3EQfzzas/content/tmp_files/load_file.txt +0 -0
  7. 0dFKT4oBgHgl3EQfNy2J/content/2301.11756v1.pdf +3 -0
  8. 0dFKT4oBgHgl3EQfNy2J/vector_store/index.faiss +3 -0
  9. 0tE2T4oBgHgl3EQfiQfL/vector_store/index.faiss +3 -0
  10. 2dFST4oBgHgl3EQfXzgB/vector_store/index.faiss +3 -0
  11. 2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf +3 -0
  12. 2tE2T4oBgHgl3EQfjAfh/vector_store/index.pkl +3 -0
  13. 2tFLT4oBgHgl3EQfrC9d/content/2301.12142v1.pdf +3 -0
  14. 2tFLT4oBgHgl3EQfrC9d/vector_store/index.faiss +3 -0
  15. 2tFLT4oBgHgl3EQfrC9d/vector_store/index.pkl +3 -0
  16. 39FAT4oBgHgl3EQflh2J/content/tmp_files/2301.08618v1.pdf.txt +1102 -0
  17. 39FAT4oBgHgl3EQflh2J/content/tmp_files/load_file.txt +0 -0
  18. 3dFST4oBgHgl3EQfYzh6/content/tmp_files/2301.13789v1.pdf.txt +1063 -0
  19. 3dFST4oBgHgl3EQfYzh6/content/tmp_files/load_file.txt +0 -0
  20. 49A0T4oBgHgl3EQfNv86/vector_store/index.faiss +3 -0
  21. 5dA0T4oBgHgl3EQfNv_S/content/tmp_files/2301.02152v1.pdf.txt +2837 -0
  22. 5dA0T4oBgHgl3EQfNv_S/content/tmp_files/load_file.txt +0 -0
  23. 69E3T4oBgHgl3EQfpwru/content/2301.04646v1.pdf +3 -0
  24. 69E3T4oBgHgl3EQfpwru/vector_store/index.faiss +3 -0
  25. 69E3T4oBgHgl3EQfpwru/vector_store/index.pkl +3 -0
  26. 79FAT4oBgHgl3EQfoh2y/content/2301.08635v1.pdf +3 -0
  27. 79FAT4oBgHgl3EQfoh2y/vector_store/index.faiss +3 -0
  28. 79FAT4oBgHgl3EQfoh2y/vector_store/index.pkl +3 -0
  29. 7NE3T4oBgHgl3EQfRgki/content/2301.04421v1.pdf +3 -0
  30. 7NE3T4oBgHgl3EQfRgki/vector_store/index.faiss +3 -0
  31. 7NE3T4oBgHgl3EQfRgki/vector_store/index.pkl +3 -0
  32. 7NFJT4oBgHgl3EQfmSym/content/tmp_files/2301.11587v1.pdf.txt +1225 -0
  33. 7NFJT4oBgHgl3EQfmSym/content/tmp_files/load_file.txt +0 -0
  34. 99AzT4oBgHgl3EQfSvs8/content/2301.01236v1.pdf +3 -0
  35. 99AzT4oBgHgl3EQfSvs8/vector_store/index.faiss +3 -0
  36. 99AzT4oBgHgl3EQfSvs8/vector_store/index.pkl +3 -0
  37. 9NE1T4oBgHgl3EQfCQIp/content/2301.02861v1.pdf +3 -0
  38. 9NE1T4oBgHgl3EQfCQIp/vector_store/index.faiss +3 -0
  39. 9NE1T4oBgHgl3EQfCQIp/vector_store/index.pkl +3 -0
  40. B9E4T4oBgHgl3EQf5Q72/content/2301.05323v1.pdf +3 -0
  41. BNE1T4oBgHgl3EQfVgSF/content/2301.03103v1.pdf +3 -0
  42. BNE1T4oBgHgl3EQfVgSF/vector_store/index.faiss +3 -0
  43. CdE2T4oBgHgl3EQfSAcB/content/tmp_files/2301.03786v1.pdf.txt +1015 -0
  44. CdE2T4oBgHgl3EQfSAcB/content/tmp_files/load_file.txt +0 -0
  45. DNE0T4oBgHgl3EQfggEA/vector_store/index.faiss +3 -0
  46. FdA0T4oBgHgl3EQfBP_A/content/2301.01974v1.pdf +3 -0
  47. FdA0T4oBgHgl3EQfBP_A/vector_store/index.faiss +3 -0
  48. FdA0T4oBgHgl3EQfBP_A/vector_store/index.pkl +3 -0
  49. HNFJT4oBgHgl3EQfuC2G/vector_store/index.pkl +3 -0
  50. I9AyT4oBgHgl3EQfsPkA/vector_store/index.pkl +3 -0
-9AyT4oBgHgl3EQf3vkG/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d50533474cbd73688acfd8097e0c23f92969a60f8203aadb354ae571ff704a44
3
+ size 9371693
.gitattributes CHANGED
@@ -6142,3 +6142,70 @@ KtE4T4oBgHgl3EQfiA3u/content/2301.05131v1.pdf filter=lfs diff=lfs merge=lfs -tex
6142
  B9AzT4oBgHgl3EQfh_2U/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6143
  _NE4T4oBgHgl3EQfEAug/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6144
  EdFRT4oBgHgl3EQfBDfd/content/2301.13464v1.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6142
  B9AzT4oBgHgl3EQfh_2U/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6143
  _NE4T4oBgHgl3EQfEAug/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6144
  EdFRT4oBgHgl3EQfBDfd/content/2301.13464v1.pdf filter=lfs diff=lfs merge=lfs -text
6145
+ 49A0T4oBgHgl3EQfNv86/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6146
+ 2dFST4oBgHgl3EQfXzgB/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6147
+ vdE3T4oBgHgl3EQfkwp-/content/2301.04600v1.pdf filter=lfs diff=lfs merge=lfs -text
6148
+ BNE1T4oBgHgl3EQfVgSF/content/2301.03103v1.pdf filter=lfs diff=lfs merge=lfs -text
6149
+ cNE5T4oBgHgl3EQfEg7V/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6150
+ NtAzT4oBgHgl3EQfWPy8/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6151
+ ztFQT4oBgHgl3EQfCjXs/content/2301.13231v1.pdf filter=lfs diff=lfs merge=lfs -text
6152
+ u9AzT4oBgHgl3EQfP_sX/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6153
+ OtFPT4oBgHgl3EQfnDXw/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6154
+ s9FAT4oBgHgl3EQfgx0D/content/2301.08589v1.pdf filter=lfs diff=lfs merge=lfs -text
6155
+ vdE3T4oBgHgl3EQfkwp-/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6156
+ YtA0T4oBgHgl3EQfFv9O/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6157
+ 0dFKT4oBgHgl3EQfNy2J/content/2301.11756v1.pdf filter=lfs diff=lfs merge=lfs -text
6158
+ c9A0T4oBgHgl3EQfGv_2/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6159
+ 0dFKT4oBgHgl3EQfNy2J/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6160
+ KNAzT4oBgHgl3EQfIPul/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6161
+ B9E4T4oBgHgl3EQf5Q72/content/2301.05323v1.pdf filter=lfs diff=lfs merge=lfs -text
6162
+ s9E1T4oBgHgl3EQfjgRh/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6163
+ f9AzT4oBgHgl3EQfMfsT/content/2301.01131v1.pdf filter=lfs diff=lfs merge=lfs -text
6164
+ 9NE1T4oBgHgl3EQfCQIp/content/2301.02861v1.pdf filter=lfs diff=lfs merge=lfs -text
6165
+ -9AyT4oBgHgl3EQf3vkG/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6166
+ KtE4T4oBgHgl3EQfJQzO/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6167
+ d9AzT4oBgHgl3EQf3f6y/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6168
+ xtAyT4oBgHgl3EQfOvaI/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6169
+ V9E4T4oBgHgl3EQfng2k/content/2301.05177v1.pdf filter=lfs diff=lfs merge=lfs -text
6170
+ odE3T4oBgHgl3EQfjQqW/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6171
+ KNE4T4oBgHgl3EQf7g74/content/2301.05341v1.pdf filter=lfs diff=lfs merge=lfs -text
6172
+ 79FAT4oBgHgl3EQfoh2y/content/2301.08635v1.pdf filter=lfs diff=lfs merge=lfs -text
6173
+ t9E0T4oBgHgl3EQf9QLr/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6174
+ FdA0T4oBgHgl3EQfBP_A/content/2301.01974v1.pdf filter=lfs diff=lfs merge=lfs -text
6175
+ s9FAT4oBgHgl3EQfgx0D/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6176
+ zdFST4oBgHgl3EQfUjgb/content/2301.13773v1.pdf filter=lfs diff=lfs merge=lfs -text
6177
+ W9E2T4oBgHgl3EQfuggX/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6178
+ sNE2T4oBgHgl3EQffAfS/content/2301.03923v1.pdf filter=lfs diff=lfs merge=lfs -text
6179
+ idFLT4oBgHgl3EQfay-h/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6180
+ BNE1T4oBgHgl3EQfVgSF/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6181
+ 9NE1T4oBgHgl3EQfCQIp/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6182
+ J9AzT4oBgHgl3EQfj_2L/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6183
+ OdFLT4oBgHgl3EQfPC8X/content/2301.12026v1.pdf filter=lfs diff=lfs merge=lfs -text
6184
+ 7NE3T4oBgHgl3EQfRgki/content/2301.04421v1.pdf filter=lfs diff=lfs merge=lfs -text
6185
+ KNE4T4oBgHgl3EQf7g74/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6186
+ 69E3T4oBgHgl3EQfpwru/content/2301.04646v1.pdf filter=lfs diff=lfs merge=lfs -text
6187
+ J9AzT4oBgHgl3EQfj_2L/content/2301.01525v1.pdf filter=lfs diff=lfs merge=lfs -text
6188
+ FdA0T4oBgHgl3EQfBP_A/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6189
+ 99AzT4oBgHgl3EQfSvs8/content/2301.01236v1.pdf filter=lfs diff=lfs merge=lfs -text
6190
+ 2tFLT4oBgHgl3EQfrC9d/content/2301.12142v1.pdf filter=lfs diff=lfs merge=lfs -text
6191
+ q9FPT4oBgHgl3EQf9TVT/content/2301.13211v1.pdf filter=lfs diff=lfs merge=lfs -text
6192
+ zdFST4oBgHgl3EQfUjgb/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6193
+ 2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf filter=lfs diff=lfs merge=lfs -text
6194
+ KNFRT4oBgHgl3EQfDTe1/content/2301.13472v1.pdf filter=lfs diff=lfs merge=lfs -text
6195
+ 2tFLT4oBgHgl3EQfrC9d/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6196
+ V9E4T4oBgHgl3EQfng2k/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6197
+ 69E3T4oBgHgl3EQfpwru/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6198
+ 7NE3T4oBgHgl3EQfRgki/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6199
+ ztFQT4oBgHgl3EQfCjXs/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6200
+ YNFJT4oBgHgl3EQf6S0s/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6201
+ uNE0T4oBgHgl3EQf9gI3/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6202
+ 0tE2T4oBgHgl3EQfiQfL/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6203
+ 09FAT4oBgHgl3EQfjh3M/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6204
+ N9E4T4oBgHgl3EQfjw0A/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6205
+ KtE4T4oBgHgl3EQfiA3u/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6206
+ DNE0T4oBgHgl3EQfggEA/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6207
+ LdE4T4oBgHgl3EQfig1d/content/2301.05134v1.pdf filter=lfs diff=lfs merge=lfs -text
6208
+ xtFJT4oBgHgl3EQfhCzP/content/2301.11564v1.pdf filter=lfs diff=lfs merge=lfs -text
6209
+ 79FAT4oBgHgl3EQfoh2y/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6210
+ 99AzT4oBgHgl3EQfSvs8/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
6211
+ SNFGT4oBgHgl3EQfgygC/content/2301.10699v1.pdf filter=lfs diff=lfs merge=lfs -text
09FAT4oBgHgl3EQfjh3M/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f95ab5fed91574f9bcb8f53ba18283bd5bbf815ba0024df8d16d7d98229511ed
3
+ size 3276845
09FAT4oBgHgl3EQfjh3M/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:883704595086a2f128515c49fdde2b2085eda1879308148b17c4c8ce48ad8779
3
+ size 121743
0NFQT4oBgHgl3EQfzzas/content/tmp_files/2301.13414v1.pdf.txt ADDED
@@ -0,0 +1,1833 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Incentive Compatibility in the Auto-bidding World
2
+ Yeganeh Alimohammadi†, Aranyak Mehta∗ and Andres Perlroth∗
3
+ February 1, 2023
4
+ Abstract
5
+ Auto-bidding has recently become a popular feature in ad auctions. This feature enables
6
+ advertisers to simply provide high-level constraints and goals to an automated agent, which
7
+ optimizes their auction bids on their behalf. These auto-bidding intermediaries interact in
8
+ a decentralized manner in the underlying auctions, leading to new interesting practical and
9
+ theoretical questions on auction design, for example, in understanding the bidding equilibrium
10
+ properties between auto-bidder intermediaries for different auctions. In this paper, we examine
11
+ the effect of different auctions on the incentives of advertisers to report their constraints to the
12
+ auto-bidder intermediaries. More precisely, we study whether canonical auctions such as first
13
+ price auction (FPA) and second price auction (SPA) are auto-bidding incentive compatible (AIC):
14
+ whether an advertiser can gain by misreporting their constraints to the autobidder.
15
+ We consider value-maximizing advertisers in two important settings: when they have a budget
16
+ constraint and when they have a target cost-per-acquisition constraint. The main result of our
17
+ work is that for both settings, FPA and SPA are not AIC. This contrasts with FPA being AIC
18
+ when auto-bidders are constrained to bid using a (sub-optimal) uniform bidding policy. We
19
+ further extend our main result and show that any (possibly randomized) auction that is truthful
20
+ (in the classic profit-maximizing sense), scalar invariant and symmetric is not AIC. Finally, to
21
+ complement our findings, we provide sufficient market conditions for FPA and SPA to become
22
+ AIC for two advertisers. These conditions require advertisers’ valuations to be well-aligned. This
23
+ suggests that when the competition is intense for all queries, advertisers have less incentive to
24
+ misreport their constraints.
25
+ From a methodological standpoint, we develop a novel continuous model of queries. This
26
+ model provides tractability to study equilibrium with auto-bidders, which contrasts with the
27
+ standard discrete query model, which is known to be hard. Through the analysis of this model,
28
+ we uncover a surprising result: in auto-bidding with two advertisers, FPA and SPA are auction
29
+ equivalent.
30
+ †Stanford University, yeganeh@stanford.edu
31
+ ∗Google, {aranyak,perlroth}@google.com
32
+ 1
33
+ arXiv:2301.13414v1 [econ.TH] 31 Jan 2023
34
+
35
+ 1
36
+ Introduction
37
+ Auto-bidding has become a popular tool in modern online ad auctions, allowing advertisers to
38
+ set up automated bidding strategies to optimize their goals subject to a set of constraints. By
39
+ using algorithms to adjust the bid for each query, auto-bidding offers a more efficient and effective
40
+ alternative to the traditional fine-grained bidding approach, which requires manual monitoring and
41
+ adjustment of the bids.
42
+ There are three main components in auto-bidding paradigm: 1) the advertisers who provide
43
+ high-level constraints to the auto-bidders, 2) the auto-bidder agents who bid – in a decentralized
44
+ manner – on behalf of each advertiser to maximize the advertiser’s value subject to their constraints,
45
+ and 3) the query-level auctions where queries are sold (see Figure 1).
46
+ Figure 1: The Auto-bidding Process: Advertisers submit constraints and receive query allocations
47
+ with specified costs as output. Inside the auto-bidding feature, each advertiser has an agent that
48
+ optimizes bidding profile within each advertiser’s constraints.
49
+ Current research has made important progress in studying the interactions of the second and third
50
+ components in the auto-bidding paradigm, particularly in understanding equilibrium properties (e.g.,
51
+ welfare and revenue) between the auto-bidders intermediaries for different auction rules (Aggarwal
52
+ et al., 2019; Balseiro et al., 2021a; Deng et al., 2021a; Mehta, 2022; Liaw et al., 2022). There is also
53
+ work on mechanism design for this setting in more generality, i.e., between the advertisers and the
54
+ auctioneer directly abstracting out the second component (Balseiro et al., 2021c, 2022; Golrezaei
55
+ et al., 2021b).
56
+ Our work, instead, examines the relation between value-maximizing advertisers, who maximize
57
+ the value they obtain subject to a payment constraint, and the other two components of the auto-
58
+ bidding paradigm. More precisely, we study the impact of different auction rules on the incentives
59
+ of advertisers to report their constraints to the auto-bidder intermediaries. We specifically ask
60
+ whether canonical auctions such as first price auction (FPA), second price auction (SPA) and general
61
+ truthful auctions are auto-bidding incentive compatible (AIC) - in other words, can advertisers gain
62
+ by misreporting their constraints to the auto-bidder?
63
+ We consider value-maximizing advertisers in two important settings: when they have a budget
64
+ constraint and when they have a target cost-per-acquisition (tCPA) constraint1. The main result of
65
+ 1The former is an upper bound on the total spend, and the latter is an upper bound on the average spend per
66
+ acquisition (sale). Our results clearly hold for more general autobidding features, such as target return on ad-spend
67
+ (tRoAS) where the constraint is an upper bound on the average spend per value generated.
68
+ 1
69
+
70
+ Auto-
71
+ constraints
72
+ bids
73
+ Advertiser
74
+ bidder
75
+ Agent
76
+ Auto-
77
+ constraints
78
+ bids
79
+ Auction
80
+ Advertiser
81
+ bidder
82
+ per query
83
+ Agent
84
+ Auto-
85
+ constraints
86
+ bids
87
+ Advertiser
88
+ bidder
89
+ Agentour work is that for both settings, FPA and SPA are not AIC. This contrasts with FPA being AIC
90
+ when auto-bidders are constrained to bid using a (sub-optimal) uniform bidding policy. We further
91
+ generalize this surprising result and show that any (possibly randomized) truthful auction having a
92
+ scale invariance and symmetry property is also not AIC. We complement our result by providing
93
+ sufficient market conditions for FPA and SPA to become AIC for two advertisers. These conditions
94
+ require advertisers’ valuations to be well-aligned. This suggests that when the competition is intense
95
+ for all queries, advertisers have less incentive to misreport their constraints.
96
+ In our model, each advertiser strategically reports a constraint (either a tCPA or a budget) to
97
+ an auto-bidder agent which bids optimally on their behalf in each of the queries. Key in our model,
98
+ we consider a two stage game where first advertisers submit constraints to the auto-bidders and, in
99
+ the subgame, auto-bidders reach a bidding equilibrium across all query-auctions. Thus, when an
100
+ advertiser deviates and reports a different constraint to its auto-bidder, the whole bidding subgame
101
+ equilibrium can change.2 In this context, an auction rule is called auto-bidding incentive compatible
102
+ (AIC) if, for all equilibria, it is optimal for the advertiser to report their constraint to the auto-bidder.
103
+ 1.1
104
+ Main Results
105
+ We begin our results by presenting a stylized example in Section 2 that demonstrates how auto-
106
+ bidding with SPA is not AIC (Theorem 2.1). Our example consists of a simple instance with three
107
+ queries and two advertisers. This example highlights a scenario where an advertiser can benefit from
108
+ lowering their reported budget or tCPA-constraint.
109
+ We then introduce a continuous query model that departs from the standard auto-bidding model
110
+ by considering each query to be of infinitesimal size. This model provides tractability in solving
111
+ equilibrium for general auction rules like FPA which is key to study the auto-bidding incentive
112
+ compatibility properties of such auctions. Further, this continuous-query model succinctly captures
113
+ real-world scenarios where the value of a single query is negligible compared to the pool of all queries
114
+ that are sold.
115
+ Under the continuous-query model, we study the case where queries are sold using FPA and show
116
+ that in the auto-bidding paradigm, FPA is not AIC (Section 4). We first characterize the optimal
117
+ bidding strategy for each auto-bidder agent which, surprisingly, has a tractable form.3 We then
118
+ leverage this tractable form to pin down an equilibrium for the case of two auto-bidders when both
119
+ auto-bidders either face a budget or tCPA constraint. In this equilibrium, queries are divided between
120
+ the two advertisers based on the ratio of their values for each advertiser. Specifically, advertiser 1
121
+ receives queries for which the ratio of its value to the other advertiser’s value is higher than a certain
122
+ threshold. From this point, determining the equilibrium reduces to finding a threshold that make
123
+ advertisers’ constraints tight (see Lemma 4.4 for more detail). We then show that for instances where
124
+ the threshold lacks monotonicity with the auto-bidders constraints, advertisers have an incentive
125
+ to misreport the constraint to the auto-bidder (Theorem 4.1). Conversely, when the thresholds
126
+ are monotone advertisers report constraints truthfully. We show conditions on the advertisers’
127
+ valuations, for the two-advertisers setting, to guarantee this monotonicity (Theorem 4.10). This
128
+ condition requires a strong positive correlation of the advertisers’ valuations across the queries. As a
129
+ 2This two stage model captures the idea that auto-bidding systems rapidly react to any change in the auction.
130
+ Hence, if there is any change in the bidding landscape, auto-bidders quickly converge to a new equilibrium.
131
+ 3Notice that in the discrete-query model, there is no simple characterization for the auto-bidder best response in a
132
+ FPA.
133
+ 2
134
+
135
+ practical insight, our results suggest that for settings where the competition on all queries is intense,
136
+ advertisers’ incentives to misreport is weak.
137
+ We then explore the case where, in FPA, auto-bidders are constrained to bid using a uniform
138
+ bidding strategy: the bid on each query is a constant times the advertiser’s value for the query.4
139
+ Uniform bidding is only an optimal strategy when auctions are truthful (Aggarwal et al., 2019).
140
+ Even though for FPA these strategies are suboptimal, they have gained recent attention in the
141
+ literature due to their tractability Conitzer et al. (2022a,b); Chen et al. (2021); Gaitonde et al. (2022).
142
+ We show that in such a scenario, FPA with uniform bidding turns out to be AIC (Theorem 4.2).
143
+ However, we note that while this proves AIC in our model, the suboptimality of uniform bidding for
144
+ FPA can give rise to incentives to deviate in other ways outside our model, e.g., by splitting the
145
+ advertising campaigns into multiple campaigns with different constraints. These considerations are
146
+ important when implementing this rule in practice.
147
+ The second part of the paper pivots to the case where auctions are truthful, that is, auctions in
148
+ which it is optimal for a profit-maximizing agent to bid their value. We first study the canonical
149
+ SPA and show that, in our continuous-query model, SPA and FPA are auction equivalent. That is,
150
+ the allocation and payments among the set of reasonable equilibria (Theorem 5.5).5 As a Corollary,
151
+ the results we obtain for FPA apply to SPA as well: SPA is not AIC and; and we derive sufficient
152
+ conditions on advertisers’ valuations so that SPA is AIC for two advertisers. We then consider a
153
+ general class of randomized truthful auctions. We show that if the allocation rule satisfies these
154
+ natural conditions:6 (i) scaled invariant (if all bids are multiplied by the same factor then the
155
+ allocation doesn’t change), and (ii) is symmetric (bidders are treated equally); then the auction rule
156
+ is not AIC.
157
+ The main results of the paper are summarized in Table 1.
158
+ Per Query Auction
159
+ AIC
160
+ Second-Price Auction
161
+ Not AIC
162
+ Truthful Auctions
163
+ Not AIC
164
+ First-Price Auction
165
+ Not AIC
166
+ First-Price Auction with Uniform Bidding
167
+ AIC7
168
+ Table 1: Main Results
169
+ 1.2
170
+ Related Work
171
+ The study of auto-bidding in ad auctions has gained significant attention in recent years. One of the
172
+ first papers to study this topic is Aggarwal et al. (2019), which presents a mathematical formulation
173
+ for the auto-bidders problem given a fixed constraints reported by advertisers. They show that
174
+ uniform bidding is an optimal strategy if and only if auctions are truthful (in the profit-maximizing
175
+ sense). They further started an important line of work to measure, using a Price of Anarchy (PoA)
176
+ approach, the welfare implications when auto-bidders are bidding in equilibrium for different auctions.
177
+ 4Uniform bidding strategy is also known in the literature as pacing bidding Conitzer et al. (2022a); Chen et al.
178
+ (2021); Conitzer et al. (2022b); Gaitonde et al. (2022).
179
+ 5We show the auction equivalence among uniform bidding equilibria for SPA and threshold-type equilibrium for
180
+ FPA.
181
+ 6These conditions have been widely studied in the literature due to their practical use (Mehta, 2022; Liaw et al.,
182
+ 2022; Allouah and Besbes, 2020).
183
+ 7As previously discussed, implementing FPA with the suboptimal uniform bidding policy can create other distortion
184
+ on advertisers’ incentives (e.g., splitting their campaign into multiple campaigns with different constraints).
185
+ 3
186
+
187
+ Current results state that for SPA the PoA is 2 Aggarwal et al. (2019) and also for FPA Liaw et al.
188
+ (2022)8, and, interestingly, it can be improved if the auction uses a randomized allocation rule Mehta
189
+ (2022); Liaw et al. (2022). In a similar venue, Deng et al. (2021b); Balseiro et al. (2021b) studies
190
+ models where the auction has access to extra information and show how reserves and boosts can be
191
+ used to improve welfare and efficiency guarantees.
192
+ A second line of work, studies how to design revenue-maximizing auctions when bidders are
193
+ value-maximizing agents and may have private information about their value or their constraints
194
+ (Golrezaei et al., 2021b; Balseiro et al., 2021c,b). In all these settings, the mechanism designer is not
195
+ constrained to the presence of the auto-bidding intermediaries (Component 2 in Figure 1). Our study
196
+ has added structure by having advertisers submit their constraints first, followed by a decentralized
197
+ subgame to achieve a bidding equilibrium before allocating and determining payments. Thus, a priori
198
+ their mechanism setting can achieve broader outcomes than in our auto-bidding constraint paradigm.
199
+ Interestingly, for the one query case the authors show that FPA with a uniform bidding policy is
200
+ optimal Balseiro et al. (2021c). Our results complement theirs and show that such mechanism is
201
+ implementable in auto-bidding constraint paradigm and is AIC.
202
+ Closer to our auto-bidding paradigm, a recent line of work has started to study the incentive of
203
+ advertisers when bidding via an auto-bidder intermediary. Mehta and Perlroth (2023) show that a
204
+ profit-maximizing agent may benefit by reporting a target-based bidding strategy to the auto-bidder
205
+ when the agent has concern that the auctioneer may change (ex-post) the auction rules. Also, in
206
+ an empirical work, Li and Tang (2022) develop a new methodology to numerically approximate
207
+ auto-bidding equilibrium and show numerical examples where advertisers may benefit my reporting
208
+ their constraints on SPA. Our work complements their findings by showing under a theoretical
209
+ framework that SPA is not AIC.
210
+ Our work also connects with the literature about auction with budgeted constraint bidders. In
211
+ particular, our results are closely related to Conitzer et al. (2022a) who study FPA with uniform
212
+ bidding (a.k.a. pacing bidding). They introduce the concept of the first-price auction pacing
213
+ equilibrium (FPPE) for budget-constrained advertisers, which is the same as the equilibrium in our
214
+ auto-bidding subgame. They show that in FPPE the revenue and welfare are monotone increasing
215
+ as a function of the advertisers’ budgets. In our work, we show that in FPPE, advertisers’ values
216
+ are monotone as a function of their reported budget. In addition, they differentiate between first
217
+ and second-price by showing that FPPE is computable, unlike SPPE, where maximizing revenue
218
+ has previously been known to be NP-hard Conitzer et al. (2022b), and that the general problem
219
+ of approximating the SPPE is PPAD-complete Chen et al. (2021). In contrast, we show in the
220
+ continuous model both SPA and FPA are tractable. Interestingly, this dichotomy between FPA and
221
+ SPA (both with uniform bidding) is reflected in our work as well – the former is AIC, while the
222
+ latter is not.
223
+ Uniform bidding has been explored in a separate body of research on repeated auctions, without
224
+ the presence of auto-bidding. Balseiro and Gur (2019) investigate strategies to minimize regret in
225
+ simultaneous first-price auctions with learning. Gaitonde et al. (2022) take this concept further by
226
+ extending the approach to a wider range of auction settings. Furthermore, Golrezaei et al. (2021a)
227
+ examines how to effectively price and bid for advertising campaigns when advertisers have both ROI
228
+ and budget constraints.
229
+ 8The authors show that for a general class of deterministic auctions PoA ≥ 2.
230
+ 4
231
+
232
+ 2
233
+ Warm Up: Second Price Auction is not AIC!
234
+ To understand the implications of the auto-bidding model, we start with an example of auto-bidding
235
+ with the second-price auction. Through this example, we will demonstrate the process of determining
236
+ the equilibrium in an auto-bidding scenario and emphasize a case where the advertiser prefers to
237
+ misreport their budget leading to the following theorem.
238
+ Theorem 2.1. For the budget setting (when all advertisers are budgeted-constrained) and for the
239
+ tCPA-setting (when all advertisers are tCPA-constrained), we have that SPA is not AIC. That is,
240
+ there are some instances where an advertiser benefits by misreporting its constraint.
241
+ Proof. Consider two budget-constrained advertisers and three queries Q = {q1, q2, q3}, where the
242
+ expected value of winning query q for advertiser a is denoted by va(q), and it is publicly known (as
243
+ in Table 2). At first, each advertiser reports their budget to the auto-bidder B1 = 2, and B2 = 4.
244
+ Then the auto-bidder agents, one for each advertiser, submit the bidding profiles (to maximize their
245
+ advertisers’ value subject to the budget constraint). The next step is a second-price auction per
246
+ query, where the queries are allocated to the highest bidder.
247
+ q1
248
+ q2
249
+ q3
250
+ Advertiser 1
251
+ 4
252
+ 3
253
+ 2
254
+ Advertiser 2
255
+ 1
256
+ 1.3
257
+ 10
258
+ Table 2: SPA with two budget constraint advertisers is not AIC: The value of each query for each
259
+ advertiser.
260
+ Finding the equilibrium bidding strategies for the auto-bidder agents is challenging, as the
261
+ auto-bidder agents have to find the best-response bids with respect to the other auto-bidder agents,
262
+ and each auto-bidder agent’s bidding profile changes the cost of queries for the rest of the agents.
263
+ To calculate such an equilibrium between auto-bidder agents, we use the result of Aggarwal et al.
264
+ (2019) to find best-response strategies. Their result states that the best response strategy in any
265
+ truthful auto-bidding auction is uniform bidding.9 In other words, each agent optimizes over one
266
+ variable, a bidding multiplier µa, and then bids on query q with respect to the scaled value µava(q).
267
+ We show that with the given budgets B1 = 2 and B2 = 4, an equilibrium exists such that
268
+ advertiser 1 only wins q1, and µ1 = 0.5 and µ2 = 1 result in such an equilibrium. To this end,
269
+ we need to check: 1) Allocation: with bidding strategies b1 = (µ1v1(q1), µ1v1(q2), µ1v1(q3)) and
270
+ b2 = (µ2v2(q1), µ2v2(q2), µ2v2(q3)), advertiser 1 wins q1 and advertiser 2 wins q2 and q3, 2) Budget
271
+ constraints are satisfied, and 3) Bidding profiles are the best response: The auto-bidder agents do
272
+ not have the incentive to increase their multiplier to get more queries. These three conditions are
273
+ checked as follows:
274
+ 1. Allocation inequalities: For each query, the advertiser with the highest bid wins it.
275
+ v1(q1)
276
+ v2(q1) ≥ µ2
277
+ µ1
278
+ = 1
279
+ 0.5 ≥ v1(q2)
280
+ v2(q2) ≥ v1(q3)
281
+ v2(q3).
282
+ 2.
283
+ Budget constraints: Since the auction is second-price the cost of query q for advertiser 1 is
284
+ µ2v2(q) and for advertiser 2 is µ1v1(q). So, we must have the following inequalities to hold so
285
+ 9They show uniform bidding is almost optimal, but in Appendix A we show that in this example it is exactly
286
+ optimal.
287
+ 5
288
+
289
+ that the budget constraints are satisfied:
290
+ 2 = B1 ≥ µ2v2(q1) = 1
291
+ (Advertiser 1),
292
+ 4 = B2 ≥ µ1(v1(3) + v1(q2)) = 2.5
293
+ (Advertiser 2).
294
+ 3. Best response: Does the advertiser’s agent have incentive to raise their multiplier to get more
295
+ queries? If not, they shouldn’t afford the next cheapest query.
296
+ 2 < µ2(v2(q1) + v2(q2)) = 2.3
297
+ (Advertiser 1),
298
+ 4 < µ1(v1(q3) + v1(q2) + v1(q1)) = 4.5
299
+ (Advertiser 2).
300
+ Since all the three conditions are satisfied. Thus, this profile is an equilibrium for th auto-bidders
301
+ bidding game. In this equilibrium, advertiser 1 wins q1 and advertiser 2 wins q2 and q3.
302
+ Now, consider the scenario that advertiser 1 wants to strategically report their budget B1 to the
303
+ auto-bidder. Suppose the first advertiser decreases their budget. Intuitively, the budget constraint
304
+ for the auto-bidder agent should be harder to satisfy, and hence the advertiser should not win more
305
+ queries. But, contrary to this intuition, when advertiser 1 reports a lower budget B′
306
+ 1 = 1, we show
307
+ that, given the unique auto-bidding equilibrium, advertiser 1 wins q1 and q2 (more queries than the
308
+ case where advertiser 1 reports B1 = 2). Similar to above, we can check that µ′
309
+ 1 =
310
+ 1
311
+ 2.3, and µ′
312
+ 2 = 1
313
+ results in an equilibrium (we prove the uniqueness in Appendix A):
314
+ 1. Allocation: advertiser 1 wins q1 and q2 since it has a higher bid on them,
315
+ v1(q1)
316
+ v2(q1) ≥ v1(q2)
317
+ v2(q2) ≥ µ′
318
+ 2
319
+ µ′
320
+ 1
321
+ = 1
322
+ 2.3 ≥ v1(q3)
323
+ v2(q3).
324
+ 2. Budget constraints:
325
+ 4 ≥ v1(q3),
326
+ and
327
+ 1 = (1/2.3)(v2(q1) + v2(q2)).
328
+ 3. Best response:
329
+ 4 < 1(v1(q3) + v1(q2)),
330
+ and
331
+ 1 < (1/2.3)(v2(q1) + v2(q2) + v2(q3)).
332
+ This surprising example leads to the first main result of the paper. In Appendix A, we will
333
+ generalize the above example to the case of tCPA-constrained advertisers with the same set of queries
334
+ as in Table 2.
335
+ Before studying other canonical auctions, in the next section we develop a tractable model of
336
+ continuous query. Under this model it turns out that the characterization of the auto-bidders bidding
337
+ equilibria when the auction is not SPA is tractable. This tractability is key for studying auto-bidding
338
+ incentive compatibility.
339
+ 6
340
+
341
+ 3
342
+ Model
343
+ The baseline model consists of a set of A advertisers competing for q ∈ Q single-slot queries owned by
344
+ an auctioneer. We consider a continuous-query model where Q = [0, 1]. Let xa(q) be the probability
345
+ of winning query q for advertiser a. Then the expected value and payment of winning query q at price
346
+ pa(q) are xa(q)va(q)dq and pa(q)dq.10, 11 Intuitively, this continuous-query model is a first-order
347
+ approximation for instances where the size of each query relative to the whole set is small.
348
+ The auctioneer sells each query q using a query-level auction which induces the allocation and
349
+ payments (xa(q), pa(q))a∈A as a function of the bids (ba)a∈A. In this paper, we focus on the First
350
+ Price Auction (FPA), Second Price Auction (SPA) and more generally any Truthful Auction (see
351
+ Section 5.2 for details).
352
+ Auto-bidder agent:
353
+ Advertisers do not participate directly in the auctions, rather they report high-level goal constraints
354
+ to an auto-bidder agent who bids on their behalf in each of the queries. Thus, Advertiser a reports a
355
+ budget constraint Ba or a target cost-per-acquisition constraint (tCPA) Ta to the auto-bidder. Then,
356
+ the auto-bidder taking as fixed other advertiser’s bid, submits bids ba(q) to induce xa(q), pa(q) that
357
+ solves
358
+ max
359
+ � 1
360
+ 0
361
+ xa(q)va(q)dq
362
+ (1)
363
+ s.t.
364
+ � 1
365
+ 0
366
+ pa(q)dq ≤ Ba + Ta
367
+ � 1
368
+ 0
369
+ xa(q)va(q)dq.
370
+ (2)
371
+ The optimal bidding policy does not have a simple characterization for a general auction. However,
372
+ when the auction is truthful (like SPA) the optimal bid take a simple form in the continuous model.
373
+ (Aggarwal et al., 2019).
374
+ Remark 3.1 (Uniform Bidding). If the per-query auction is truthful, then uniform bidding is the
375
+ optimal policy for the autobidder. Thus, ba(q) = µ · va(q) for some µ > 0. We formally prove this in
376
+ Claim 5.4.
377
+ Advertisers
378
+ Following the current paradigm in autobidding, we consider that advertisers are value-maximizers
379
+ and can be two of types: a budget-advertiser or tCPA-advertiser. Payoffs for these advertisers are as
380
+ follows.
381
+ • For a budget-advertiser with budget Ba, the payoff is
382
+ ua =
383
+ �� 1
384
+ 0 xa(q)va(q)dq
385
+ if
386
+ � 1
387
+ 0 pa(q)dq ≤ Ba
388
+ −∞
389
+ if not.
390
+ 10All functions va, xa, pa are integrable with respect to the Lebesgue measure dq.
391
+ 11The set Q = [0, 1] is chosen to simplify the exposition. Our results apply to a general metric measurable space
392
+ (Q, A, λ) with atomless measure λ.
393
+ 7
394
+
395
+ • For a tCPA-advertiser with target Ta, the payoff is
396
+ ua =
397
+ �� 1
398
+ 0 xa(q)va(q)dq
399
+ if
400
+ � 1
401
+ 0 pa(q)dq ≤ Ta ·
402
+ � 1
403
+ 0 xa(q)va(q)dq
404
+ −∞
405
+ if not.
406
+ Game, Equilibrium and Auto-bidding Incentive Compatibility (AIC)
407
+ The timing of the game is as follows. First, each advertiser depending on their type submits a
408
+ budget or target constraint to an auto-bidder agent. Then, each auto-bidder solves Problem 1 for
409
+ the respective advertiser. Finally, the per-query auctions run and allocations and payments accrue.
410
+ We consider a complete information setting and use subgame perfect equilibrium (SPE) as
411
+ solution concept. Let Va(B′
412
+ a; Ba) the expected payoff in the subgame for a budget-advertiser with
413
+ budget Ba that reports B′
414
+ a to the auto-bidder (likewise we define Va(T ′
415
+ a; Ta) for the tCPA-advertiser).
416
+ Definition 3.2 (Auto-bidding Incentive Compatibility (AIC)). An auction rule is Auto-bidding
417
+ Incentive Compatible (AIC) if for every SPE we have that Va(Ba; Ba) ≥ Va(B′
418
+ a; Ba) and Va(Ta; Ta) ≥
419
+ Va(T ′
420
+ a; Ta) for every Ba, B′
421
+ a, Ta, T ′
422
+ a.
423
+ Similar to classic notion of incentive compatibility, an auction rule satisfying AIC makes the
424
+ advertisers’ decision simpler: they simply need to report their target to the auto-bidder. However,
425
+ notice that the auto-bidder plays a subgame after advertiser’s reports. Thus, when Advertiser a
426
+ deviates and submit a different constraint, the subgame outcome may starkly change not only on
427
+ the bids of Advertiser a but also other advertisers bid may change as well.
428
+ 4
429
+ First Price Auctions
430
+ In this section, we demonstrate that the first price auction is not auto-bidding incentive compatible.
431
+ Theorem 4.1. Suppose that there are at least two budget-advertisers or two tCPA-advertisers, then
432
+ FPA is not AIC.
433
+ Later in Section 4.2, we show a complementary result by providing sufficient conditions on
434
+ advertisers’ value functions to make FPA be AIC for the case of two advertisers. We show that this
435
+ sufficient condition holds in many natural settings, suggesting that in practice FPA tends to be AIC.
436
+ Then in Section 4.3, we turn our attention to FPA where autobidders are restricted to use
437
+ uniform bidding across the queries. In this case, we extend to our continuous-query model the result
438
+ of Conitzer et al. (2022a) and show the following result.
439
+ Theorem 4.2. FPA restricted to uniform bidding is AIC.
440
+ 4.1
441
+ Proof of Theorem 4.1
442
+ We divide the proof of Theorem 4.1 in three main steps. Step 1 characterizes the best response
443
+ bidding profile for an autobidder in the subgame. As part of our analysis, we derive a close connection
444
+ between first and second price auctions in the continuous-query model that simplifies the task of
445
+ finding the optimal bidding for each query to finding a single multiplying variable for each advertiser.
446
+ In Step 2, we leverage the tractability of our continuous-query model and pin-down the subgame
447
+ bidding equilibrium when there are either two budget-advertisers or two tCPA-advertisers in the
448
+ 8
449
+
450
+ game (Lemma 4.4). We derive an equation that characterizes the ratio of the multipliers of each
451
+ advertiser as a function of the constraints submitted by the advertisers. This ratio defines the set of
452
+ queries that each advertiser wins, and as we will see the value accrued by each advertiser is monotone
453
+ in this ratio. So, to find a non-AIC example, one has to find scenarios where the equilibrium ratio is
454
+ not a monotone function of the input constraints which leads to the next step.
455
+ To conclude, we show in Step 3 an instance where the implicit solution for the ratio is nonmono-
456
+ tonic, demonstrating that auto-bidding in first-price auctions is not AIC. As part of our proof, we
457
+ interestingly show that AIC is harder to satisfy when advertisers face budget constraints rather than
458
+ tCPA constraints (see Corollary 4.6).
459
+ Step 1: Optimal Best Response
460
+ The following claim shows that, contrary to the discrete-query model, the best response for the
461
+ autobidder in a first price auction can be characterized as function of a single multiplier.
462
+ Claim 4.3. Taking other auto-bidders as fixed, there exists a multiplier µa ≥ 0 such that the following
463
+ bidding strategy is optimal:
464
+ ba(q) =
465
+
466
+ maxa′̸=a(ba′(q))
467
+ µava(q) ≥ maxa′̸=a(ba′(q))
468
+ 0
469
+ µava(q) ̸= maxa′(ba′(q)).
470
+ The result holds whether the advertiser is budget-constrained or tCPA-constrained12.
471
+ Proof. We show that in a first-price auction, the optimal bidding strategy is to bid on queries with
472
+ a value-to-price ratio above a certain threshold. To prove this, we assume that the bidding profile of
473
+ all advertisers is given. Since the auction is first-price, advertiser a can win each query q by fixing
474
+ small enough ϵ > 0 and paying maxa′̸=a(ba′(q)) + ϵ. So, let pa(q) = maxa′̸=a(ba′(q)), be the price of
475
+ query q. Since we have assumed that the value functions of all advertisers are integrable (i.e., there
476
+ are no measure zero sets of queries with a high value), in the optimal strategy pa is also integrable
477
+ since it is suboptimal for any advertiser to bid positive (and hence have a positive cost) on a measure
478
+ zero set of queries.
479
+ First, consider a budget-constrained advertiser. The main idea is that since the prices are
480
+ integrable, the advertiser’s problem is similar to a continuous knapsack problem. In a continuous
481
+ knapsack problem, it is well known that the optimal strategy is to choose queries with the highest
482
+ value-to-cost ratio Goodrich and Tamassia (2001). Therefore, there must exist a threshold, denoted
483
+ as µ, such that the optimal strategy is to bid on queries with a value-to-price ratio of at least µ. So
484
+ if we let µa = 1
485
+ µ, then advertiser a bids on any query with µava(q) ≥ pa(q).
486
+ We prove it formally by contradiction. Assume to the contrary, that there exist non-zero measure
487
+ sets X, Y ⊂ Q such that for all x ∈ X and y ∈ Y , the fractional value of x is less than the fractional
488
+ value of y, i.e.,
489
+ va(x)
490
+ pa(qx) < va(y)
491
+ pa(y), and in the optimal solution advertiser a gets all the queries in X and
492
+ no query in Y . However, we show that by swapping queries in X with queries in Y with the same
493
+ price, the advertiser can still satisfy its budget constraint while increasing its value.
494
+ To prove this, fix 0 < α < min(
495
+
496
+ X pa(q)dq,
497
+
498
+ Y pa(q)dq). Since the Lebesgue measure is atomless,
499
+ there exists subsets X′ ⊆ X and Y ′ ⊆ Y such that α =
500
+
501
+ X′ pa(q)dq =
502
+
503
+ Y ′ pa(q)dq. Since the value
504
+ 12In FPA ties are broken in a way that is consistent with the equilibrium. This is similar to the pacing equilibrium
505
+ notion where the tie-breaking rule is endogenous to the equilibrium Conitzer et al. (2022a).
506
+ 9
507
+
508
+ per cost of queries in Y is higher than queries in X, by swapping queries of X′ with Y ′, the value
509
+ of the new sets increases, while the cost does not change. Therefore, the initial solution cannot be
510
+ optimal.
511
+ A similar argument holds for tCPA-constrained advertisers. Swapping queries in X′ with Y ′ does
512
+ not change the cost and increases the upper bound of the tCPA constraint, resulting in a feasible
513
+ solution with a higher value. Therefore, the optimal bidding strategy for tCPA constraint is also
514
+ ba(q) as defined in the statement of the claim.
515
+ Step 2: Equilibrium Characterization
516
+ The previous step showed that the optimal bidding strategy is to bid on queries with a value-to-price
517
+ ratio above a certain threshold. Thus, we need to track one variable per auto-bidder to find the
518
+ subgame equilibrium.
519
+ In what follows, we focus on the case of finding the variables when there are only two advertisers in
520
+ the game. This characterization of equilibrium gives an implicit equation for deriving the equilibrium
521
+ bidding strategy, which makes the problem tractable in our continuous-query model.13.
522
+ From Claim 4.3 we observe that the ratio of bidding multipliers is key to determine the set of
523
+ queries that each advertiser wins. To map the space of queries to the bidding space, we introduce
524
+ the function h(q) = v1(q)
525
+ v2(q). Hence, for high values of h, the probability that advertiser 1 wins the
526
+ query increases. Also, notice that without loss of generality, we can reorder the queries on [0, 1] so
527
+ that h is non-decreasing.
528
+ In what follows, we further assume that h is increasing on [0, 1]. This implies that h is invertible
529
+ and also differentiable almost everywhere on [0, 1]. With these assumptions in place, we can now
530
+ state the following lemma to connect the subgame equilibrium to the ratio of advertisers’ values.
531
+ Lemma 4.4. [Subgame Equilibrium in FPA] Given two budget-constrained auto-bidders with budget
532
+ B1 and B2, let µ1 and µ2 be as defined in Claim 4.3 for auto-bidding with FPA. Also assume that
533
+ h(q) = v1(q)
534
+ v2(q) as defined above is strictly monotone. Then µ1 =
535
+ B2
536
+ E[z1(z≥r)] and µ2 = µ1r, where r is
537
+ the solution of the following implicit function,
538
+ rE[1[z ≥ r)]
539
+ E[z1(z ≤ r)] = B1
540
+ B2
541
+ .
542
+ (3)
543
+ Here, E[·] is defined as E[P(z)] =
544
+ � ∞
545
+ 0 P(z)f(z)dz, where f(z) = v2(h−1(z))
546
+ h′(h−1(z)) wherever h′ is defined,
547
+ and it is zero otherwise.
548
+ Also, for two tCPA auto-bidders with targets T1 and T2, we have µ1 = T1E[1(z≤r)]
549
+ E[1(z≥r)]
550
+ and µ2 = µ1r,
551
+ where r is the answer of the following implicit function,
552
+ rE[1(z ≥ r)]
553
+ E[z1(z ≥ r)]
554
+ E[1(z ≤ r)]
555
+ E[z1(z ≤ r)] = T1
556
+ T2
557
+ .
558
+ (4)
559
+ Remark 4.5. The function f intuitively represents the expected value of the queries that advertiser
560
+ 2 can win as well as the density of the queries that advertiser 1 can win. Also, the variable r shows
561
+ the cut-off on how the queries are divided between the two advertisers. In the proof, we will see that
562
+ the advertisers’ value at equilibrium is computed with respect to f: Advertiser 1’s overall value is
563
+ � ∞
564
+ r
565
+ zf(z)dz and advertiser 2’s overall value is
566
+ � r
567
+ 0 f(z)dz.
568
+ 13Notice that for the discrete-query model finding equilibrium is PPAD hard Filos-Ratsikas et al. (2021)
569
+ 10
570
+
571
+ Proof. First, consider budget constraint auto-bidders. Given Claim 4.3, in equilibrium price of query
572
+ q is min(µ1v1(q), µ2v2(q)). Therefore, the budget constraints become:
573
+ B1 =
574
+ � 1
575
+ 0
576
+ µ2v2(q)1(µ2v2(q) ≤ µ1v1(q))dq,
577
+ B2 =
578
+ � 1
579
+ 0
580
+ µ1v1(q)1(µ2v2(q) ≥ µ1v1(q))dq.
581
+ With a change of variable from q to z = h(q) and letting r = µ2
582
+ µ1 , we have:
583
+ B1 =
584
+ � ∞
585
+ r
586
+ µ2v2(h−1(z))dh−1(z)
587
+ dz
588
+ dz
589
+ B2 =
590
+ � r
591
+ 0
592
+ µ1v1(h−1(z))dh−1(z)
593
+ dz
594
+ dz.
595
+ Observe that v1(h−1(z)) = zv2(h−1(z)), then if we let f(z) = v2(h−1)(h−1)′ =
596
+ v2(h−1(z))
597
+ h′(h−1(z)), the
598
+ constraints become
599
+ B1 = µ2
600
+ � ∞
601
+ r
602
+ f(z)dz,
603
+ (5)
604
+ B2 = µ1
605
+ � r
606
+ 0
607
+ zf(z)dz.
608
+ (6)
609
+ We obtain Equation (3) by diving both sides of Equation (5) by the respective both sides of
610
+ Equation (6).
611
+ Now, consider two tCPA constrained auto-bidders. Similar to the budget-constrained auto-bidders,
612
+ we can write
613
+ T1
614
+ � 1
615
+ 0
616
+ v1(q)1(µ2v2(q) ≤ µ1v1(q))dq =
617
+ � 1
618
+ 0
619
+ µ2v2(q)1(µ2v2(q) ≤ µ1v1(q))dq
620
+ T2
621
+ � 1
622
+ 0
623
+ v2(q)1(µ2v2(q) ≥ µ1v1(q))dq =
624
+ � 1
625
+ 0
626
+ µ1v1(q)1(µ2v2(q) ≥ µ1v1(q))dq
627
+ The same way of changing variables leads to the following:
628
+ T1
629
+ T2
630
+ � ∞
631
+ r
632
+ xf(x)dx
633
+ � r
634
+ 0 f(x)
635
+ = r
636
+ � ∞
637
+ r
638
+ f(x)dx
639
+ � r
640
+ 0 xf(x)dx .
641
+ This finishes the proof of the lemma.
642
+ The previous theorem immediately implies that any example of valuation functions that is
643
+ non-AIC for budget-advertisers, it will be non-AIC for tCPA-advertisers as well.
644
+ Corollary 4.6. If auto-bidding with the first-price and two budget-advertisers is not AIC, then
645
+ auto-bidding with the same set of queries and two tCPA-advertisers is also not AIC.
646
+ 11
647
+
648
+ Proof. Recall that advertiser 1 wins all queries with h(q) ≥ r. So, the value accrued by advertiser
649
+ 1 is decreasing in r. So, if an instance of auto-bidding with tCPA-constrained advertisers is not
650
+ AIC for advertiser 1, then the corresponding function r
651
+ � ∞
652
+ r
653
+ f(x)dx
654
+ � r
655
+ 0 xf(x)dx
656
+ � r
657
+ 0 f(x)
658
+ � ∞
659
+ r
660
+ xf(x)dx (same as (4)) must be
661
+ increasing for some r′.
662
+ On the other hand, recall that r
663
+ � ∞
664
+ r
665
+ f(x)dx
666
+ � r
667
+ 0 xf(x)dx is the ratio for budget-constrained bidders equilibrium
668
+ as in (3). The additional multiplier in the equilbirum equation of tCPA constraint advertiser in
669
+ (4) is
670
+ � r
671
+ 0 f(x)dx
672
+ � ∞
673
+ r
674
+ xf(x) which is increasing in r. So, if the auto-bidding for budget-constrained bidders is
675
+ not AIC and hence the corresponding ratio is increasing for some r′, it should be increasing for the
676
+ tCPA-constrained advertisers as well, which proves the claim.
677
+ Step 3: Designing a non AIC instance
678
+ The characterization of equilibrium from Step 2 leads us to construct an instance where advertisers
679
+ have the incentive to misreport their constraints. The idea behind the proof is that the value accrued
680
+ by the advertiser 1 is decreasing in r ( as found in Lemma 4.4). Then to find a counter-example, it
681
+ will be enough to find an instance of valuation functions such that the equilibrium equation (3) is
682
+ non-monotone in r.
683
+ Proof of Theorem 4.1. We construct an instance with two budget-constrained advertisers.
684
+ By
685
+ Corollary 4.6 the same instance would work for tCPA-constrained advertisers. To prove the theorem,
686
+ we will find valuation functions v1 and v2 and budgets B1 and B2 such that the value accrued by
687
+ advertiser 1 decreases when their budget increases.
688
+ Define g(r) =
689
+ � r
690
+ 0 xf(x)dx
691
+ r
692
+ � ∞
693
+ r
694
+ f(x)dx. By Lemma 4.4, one can find the equilibrium by solving the equation
695
+ g(r) = B2
696
+ B1 . Recall that advertiser 1 wins all queries with v1(q)
697
+ v2(q) ≥ r. So, the total value of queries
698
+ accrued by advertiser 1 is decreasing in r. Hence, to construct a non- AIC example, it is enough to
699
+ find a function f such that g is non-monotone in r.
700
+ A possible such non-monotone function g is
701
+ g(r) = (r − 1)3 + 3
702
+ cr
703
+ − 1,
704
+ (7)
705
+ where c is chosen such that minr≥0 g(r) = 0, i.e., c = min (r−1)3+3
706
+ r
707
+ ≈ 1.95105. To see why g is
708
+ non-monotone, observe that g(r) is decreasing for r ≤ 1.8, because g′(r) = 2r3−3r2−2
709
+ cr2
710
+ is negative for
711
+ r ≤ 1.8, and then increasing for r ≥ 1.81.
712
+ We claim the function f defined as in,
713
+ f(r) = 3c(r − 1)2 e
714
+ � r
715
+ 0
716
+ c
717
+ (r−1)3+3 dx
718
+ ((r − 1)3 + 3)2 ,
719
+ (8)
720
+ would result in the function g in (7). To see why this claim is enough to finish the proof, note that
721
+ there are many ways to choose the value functions of advertisers to derive tf as in (8). One possible
722
+ way is to define v1, v2 : [0, 1] → R as v2(q) = f(tan(q))/(tan(q)2 + 1) and v1(q) = tan(q)v2(q) (see
723
+ Fig. 2).
724
+ 12
725
+
726
+ (a) Valuation function of two advertisers.
727
+ (b) Finding the equilibrium using (3).
728
+ Figure 2: An example of two advertisers such that FPA is not AIC (proof of Theorem 4.1). When
729
+ B1
730
+ B2 = 1200, there are three values for r (see the right panel) that lead to equilibrium, and one
731
+ (orange) leads to non-AIC equilibrium.
732
+ So it remains to prove that choosing f as in (8) would result in g as defined in (7). To derive f
733
+ from g, first we simplify g using integration by part,
734
+ g(r) =
735
+ � r
736
+ 0 xf(x)dx
737
+ r
738
+ � ∞
739
+ r
740
+ f(x)dx
741
+ = r
742
+ � r
743
+ 0 f(x)dx −
744
+ � r
745
+ 0
746
+ � x
747
+ 0 f(y)dydx
748
+ r
749
+ � ∞
750
+ r
751
+ f(x)dx
752
+ = r
753
+ � ∞
754
+ 0 f(x)dx −
755
+ � r
756
+ 0
757
+ � x
758
+ 0 f(y)dydx
759
+ r
760
+ � ∞
761
+ r
762
+ f(x)dx
763
+ − 1,
764
+ Assuming that
765
+ � ∞
766
+ 0 f(x) is finite, the above equations lead to the following
767
+ rg(r) + r =
768
+ � r
769
+ 0
770
+ � ∞
771
+ x f(y)dydx
772
+ � ∞
773
+ r
774
+ f(x)dx
775
+ .
776
+ (9)
777
+ Therefore, by integrating the inverse of both sides,
778
+ log(
779
+ � r
780
+ 0
781
+ � ∞
782
+ x
783
+ f(y)dydx) = C +
784
+ � r
785
+ 0
786
+ 1
787
+ xg(x) + xdx,
788
+ and by raising to the exponent
789
+ � r
790
+ 0
791
+ � ∞
792
+ x
793
+ f(y)dydx = Ke
794
+ � r
795
+ 0
796
+ 1
797
+ xg(x)+x dx.
798
+ for some constants C and K > 0. Then by differentiating both sides with respect to x,
799
+ � ∞
800
+ r
801
+ f(x)dx =
802
+ K
803
+ rg(r) + re
804
+ � r
805
+ 0
806
+ 1
807
+ xg(x)+x dx.
808
+ Note that for any choice of K ≥ 0, dividing the last two equations will result in (9). So, without loss
809
+ of generality, we can assume K = 1. By differentiating again, we can derive f as a function of g:
810
+ f(r) = (g′(r)r + g(r))
811
+ (rg(r) + r)2 e
812
+ � r
813
+ 0
814
+ 1
815
+ xg(x)+x dx.
816
+ 13
817
+
818
+ 3.0
819
+ -
820
+ Vi(q)
821
+ 25
822
+ V2(q)
823
+ 20
824
+ 15
825
+ LD
826
+ 0.5
827
+ 0.D
828
+ 0.2
829
+ t0
830
+ 0.6
831
+ o.B
832
+ LD
833
+ q3500
834
+ rE[1[z ≥ r]]
835
+ E[21(z ≤ r)]
836
+ 31D0
837
+ 2500
838
+ 22000
839
+ 1500
840
+ 14D0
841
+ 50D
842
+ 0
843
+ FN
844
+ 4
845
+ 6
846
+ 1
847
+ rWe need g′(r)r + g(r) ≥ 0 to ensure that f(r) ≥ 0 for all r. This holds for g as in (7). Finally, by
848
+ substituting g as in (7), we will derive f as in (8).
849
+ Remark 4.7. Note that the above proof shows that for values of r such that there exists a equilibrium
850
+ which is not AIC, there exists always a second monotone equilibrium. This follows from the fact that
851
+ the function g(r) tends to infinity as r → ∞, so, g must be increasing for some large enough r.
852
+ Before moving on to finding conditions for incentive compatibility, we also note that the above’s
853
+ characterization implies the existence of equilibrium for auto-bidding with any pairs of advertisers.
854
+ Proposition 4.8. Given auto-bidding satisfying the conditions of Lemma 4.4, the equilibrium for
855
+ all pairs of budgets or all pairs of tCPA constrained advertisers always exists.
856
+ Proof. Recall that the equilibirum exists if there exists an r such that
857
+ B2
858
+ B1
859
+ =
860
+ � r
861
+ 0 xf(x)dx
862
+ r
863
+ � ∞
864
+ r
865
+ f(x)dx
866
+ has a solution for any value of B2
867
+ B1 . Note that the right-hand side (
868
+ � r
869
+ 0 xf(x)dx
870
+ r
871
+ � ∞
872
+ r
873
+ f(x)dx) is positive for any
874
+ r > 0, and it continuously grows to infinity as r → ∞. So, to make sure that every value of
875
+ B2/B1 is covered, we need to check whether at r = 0 the ratio becomes zero. By L’Hopital rule,
876
+ limz→0
877
+ zf(z)
878
+ � ∞
879
+ z
880
+ f(x)dx−zf(z) = 0, which is as desired.
881
+ For tCPA constrained advertiser, the second ratio
882
+ r
883
+ � r
884
+ 0 f(x)dx
885
+ � ∞
886
+ r
887
+ xf(x)dx always converges to 0, so the
888
+ equilibrium in this case always exists.
889
+ 4.2
890
+ Sufficient Conditions for Incentive Compatibility
891
+ In this section we show that the lack of ACI happens for cases where advertisers’ valuations have
892
+ unusual properties.
893
+ More precisely, the main result of the section is to characterize sufficient
894
+ conditions on the advertiser’s valuations so that FPA is AIC when there are two advertisers in the
895
+ auction.
896
+ For this goal, we recall the function f(z) = v2(h−1(z))
897
+ h′(h−1(z)) where h(q) = v1(q)
898
+ v2(q) defined in Section 4.1.
899
+ As shown in Lemma4.4, function f behaves as a value of the queries advertiser 2 gets and the density
900
+ of queries that advertiser 1 gets.
901
+ Lemma 4.9. Consider that there are two advertisers and they can either both be budget-advertisers
902
+ or tCPA-advertisrs. Also, suppose that auto-bidder with FPA uses the optimal bidding strategy in
903
+ Claim 4.3. Then a sufficient condition for FPA to be AIC is that f has a monotone hazard rate, i.e.,
904
+ f(r)
905
+ � ∞
906
+ r
907
+ f(x)dx is non-decreasing in r.
908
+ Proof. Following the proof Theorem 4.1, if g(r) =
909
+ � r
910
+ 0
911
+ � ∞
912
+ x
913
+ f(y)dydx
914
+ r
915
+ � ∞
916
+ r
917
+ f(x)dx
918
+ is non-decreasing in r then the
919
+ equilibrium is AIC. The equivalent sufficient conditions obtained by imposing the inequality g′(r) ≥ 0
920
+ is that for all r ≥ 0,
921
+ r
922
+ � � ∞
923
+ r
924
+ f(x)dx
925
+ �2 ≥
926
+ � � r
927
+ 0
928
+ � ∞
929
+ x
930
+ f(y)dydx
931
+ �� � ∞
932
+ r
933
+ f(x)dx − rf(r)
934
+
935
+ .
936
+ (10)
937
+ 14
938
+
939
+ If
940
+ � ∞
941
+ r
942
+ f(x)dx ≤ rf(r) then above’s inequality obviously holds. So, we can assume that for some
943
+ r > 0,
944
+ � ∞
945
+ r
946
+ f(x)dx > rf(r). Since
947
+ f(z)
948
+ � ∞
949
+ z
950
+ f(x)dx is non-decreasing in z, we must have that for all r′ ≤ r,
951
+ f(r′)
952
+ � ∞
953
+ r′ f(x)dx ≤
954
+ f(r)
955
+ � ∞
956
+ r
957
+ f(x)dx ≤ 1
958
+ r ≤ 1
959
+ r′ . On the other hand by taking the derivative of
960
+ f(z)
961
+ � ∞
962
+ z
963
+ f(x)dx, we must
964
+ have that f′(z)
965
+ � ∞
966
+ z
967
+ f(x)dx + f(z)2 ≥ 0. By considering two cases on the sign of f′, for z ≤ r we
968
+ must have, f(z)
969
+
970
+ zf′(z) + f(z)) ≥ 0, and hence (zf(z))′ ≥ 0 for all z ≤ r. Therefore, zf(z) is
971
+ non-decreasing for z ≤ r.
972
+ On the other hand,
973
+ � r
974
+ 0
975
+ � ∞
976
+ x
977
+ f(y)dydx =
978
+ � r
979
+ 0
980
+ � r
981
+ x
982
+ f(y)dydx +
983
+ � r
984
+ 0
985
+ � ∞
986
+ r
987
+ f(y)dydx
988
+ = r
989
+ � r
990
+ 0
991
+ f(x)dx −
992
+ � r
993
+ 0
994
+ � x
995
+ 0
996
+ f(y)dydx + r
997
+ � ∞
998
+ r
999
+ f(x)dx
1000
+ =
1001
+ � r
1002
+ 0
1003
+ xf(x)dx + r
1004
+ � ∞
1005
+ r
1006
+ f(x)dx,
1007
+ where the second equation is by integration by part. Then by applying monotonicity of z(f)z for
1008
+ z ≤ r we have
1009
+ � r
1010
+ 0
1011
+ � ∞
1012
+ x f(y)dydx ≤ r2f(r) + r
1013
+ � ∞
1014
+ r
1015
+ f(x)dx. So, to prove (10) it is enough to show that
1016
+ �� ∞
1017
+ r
1018
+ f(x)dx
1019
+ �2
1020
+
1021
+
1022
+ rf(r) +
1023
+ � ∞
1024
+ r
1025
+ f(x)dx
1026
+ � �� ∞
1027
+ r
1028
+ f(x)dx − rf(r)
1029
+
1030
+ ,
1031
+ which holds, since the right-hand side is equal to
1032
+ � � ∞
1033
+ r
1034
+ f(x)dx
1035
+ �2 − (rf(r))2 strictly less than the
1036
+ left-hand side.
1037
+ While the condition on f has intuitive properties when seen as a density, it has the unappealing
1038
+ properties to be too abstract in terms of the conditions on the advertisers’ valuation. The following
1039
+ result, provides sufficient conditions on value functions v1 and v2 that makes f be monotone hazard
1040
+ rate, and hence, FPA to be AIC.
1041
+ Theorem 4.10. Consider two advertisers that are either budget-advertisers or tCPA-advertisers.
1042
+ Assume that h(q) = v1(q)
1043
+ v2(q) is increasing concave function and that v2 is non-decreasing. Then, the
1044
+ equilibrium in FPA auto-bidding with bidding strategy as in Claim 4.3 is AIC.
1045
+ Proof. Note that when f is non-decreasing, it also has a monotone hazard rate. Now, when h
1046
+ is concave,
1047
+ 1
1048
+ h′ is a non-decreasing function, and since v2 is also non-decreasing, then f is also
1049
+ non-decreasing.
1050
+ 4.3
1051
+ FPA with uniform bidding
1052
+ The previous section shows that when auto-bidders have full flexibility on the bidding strategy,
1053
+ FPA is not AIC. However, non-uniform bid is not simple to implement and auto-bidders may be
1054
+ constrained to use simpler uniform bidding policies (aka pacing bidding). In this context, the main
1055
+ result of the section is Theorem 4.2 that shows that when restricted to uniform bidding policies FPA
1056
+ is AIC. Note that here, we are assuming a simple model where advertisers do not split campaigns.
1057
+ So, FPA with uniform bidding is AIC but it could bring up other incentives for advertisers when it
1058
+ is implemented.
1059
+ 15
1060
+
1061
+ Definition 4.11 (Uniform bidding equilibrium). A uniform bidding equilibrium for the auto-bidders
1062
+ subgame corresponds to bid multipliers µ1, . . . , µN such that every auto-bidder a chooses the uniform
1063
+ bidding policy µa that maximizes Problem (1) when restricted to uniform bidding policies with the
1064
+ requirement that if advertiser a’s constraints of type 2 are not tight then µa gets its maximum possible
1065
+ value.14
1066
+ The proof of Theorem 4.2 is based on the main results of Conitzer et al. (2022a). The authors
1067
+ proved that the uniform-bidding equilibrium is unique and in equilibrium the multiplier of each
1068
+ advertiser is the maximum multiplier over all feasible uniform bidding strategies. Their result is
1069
+ for budget-constrained advertisers, and we extend it to include tCPA constrained advertisers. The
1070
+ proof is deferred to Appendix B.
1071
+ Lemma 4.12 (Extension of Theorem 1 in Conitzer et al. (2022a)). Given an instance of Auto-
1072
+ bidding with general constraints as in (2), there is a unique uniform bidding equilibrium, and the bid
1073
+ multipliers of all advertisers is maximal among all feasible uniform bidding profiles.
1074
+ Now, we are ready to prove Theorem 4.2.
1075
+ Proof of Theorem 4.2. Assume that advertiser 1 increases their budget or their target CPA. Then the
1076
+ original uniform bidding is still feasible for all advertisers. Further, by Lemma 4.12 the equilibrium
1077
+ pacing of all advertisers is maximal among all feasible pacings. So, the pacing of all advertisers
1078
+ will either increase or remain the same. But the constraints of all advertisers except 1 are either
1079
+ binding or their multiplier has attained its maximum value by the definition of pacing equilibrium.
1080
+ Therefore, the set of queries they end up with should be a subset of their original ones since the
1081
+ price of all queries will either increase or remain the same. So, it is only advertiser 1 that can win
1082
+ more queries.
1083
+ Remark 4.13. Conitzer et al. (2022a) show monotonicity properties of budgets in FPA with uniform
1084
+ bidding equilibrium for the revenue and welfare. Instead, in our work we focus on monotonicity for
1085
+ each advertiser.
1086
+ 5
1087
+ Truthful Auctions
1088
+ This section studies auto-bidding incentive compatibility for the case where the per-query auction is
1089
+ a truthful auction.
1090
+ A truthful auction is an auction where the optimal bidding strategy for a profit-maximizing
1091
+ agent is to bid its value. An important example of a truthful auction is Second Price Auction. As we
1092
+ showed in the three-queries example of the introduction, SPA is not AIC. In this section, we show
1093
+ that the previous example generalizes, in our continuous-query model, to any (randomized) truthful
1094
+ auctions so long as the auction is scalar invariant and symmetric (see Assumption 5.1 below for
1095
+ details). As part of our proof-technique, we obtain an auction equivalence result which is interesting
1096
+ on its own: in the continuous query-model SPA and FPA have the same outcome.15
1097
+ For the remaining of the section we assume all truthful auction satisfy the following property.
1098
+ 14When valuations are strictly positive for all queries q ∈ [0, 1], we can easily show that bid multipliers have to be
1099
+ bounded in equilibrium. When this is not the case, we set a cap sufficiently high to avoid bid multipliers going to
1100
+ infinity.
1101
+ 15It is well-known that in the discrete-query model, FPA and SPA are not auction equivalent in the presence of
1102
+ auto-bidders.
1103
+ 16
1104
+
1105
+ Assumption 5.1. Let (xa(b))a∈A be the allocation rule in a truthful auction given bids b = (ba)a∈A.
1106
+ We assume that the allocation rule satisfies the following properties.
1107
+ 1. The auction always allocates: �
1108
+ a∈A xa(b) = 1
1109
+ 2. Scalar invariance: For any constant c > 0 and any advertiser a ∈ A, xa(b) = xa(cb).
1110
+ 3. Symmetry: For any pair of advertisers a, a′ ∈ A and bids b, b′, b−{a,a′} = (b)a∈A\{a,a′} we have
1111
+ that
1112
+ xa(ba = b, ba′ = b′, b−{a,a′}) = xa′(ba = b′, ba′ = b, b−{a,a′}).
1113
+ Remark 5.2. Observe that SPA satisfies Assumption 5.1.
1114
+ From the seminal result of Myerson (1981) we obtain a tractable characterization of truthful
1115
+ auctions which we use in our proof.
1116
+ Lemma 5.3 (Truthful auctions (Myerson, 1981)). Let (xa(b), pa(b))a∈A the allocation and pricing
1117
+ rule for an auction given bids b = (ba)a∈A. The auction rule is truthful if and only if
1118
+ 1. Allocation rule is non-decreasing on the bid: For each bidder a ∈ A and any b′
1119
+ a ≥ ba, we have
1120
+ that
1121
+ xa(b′
1122
+ a, b−a) ≥ xa(ba, b−a).
1123
+ 2. Pricing follows Myerson’s formulae:
1124
+ pa(b) = ba · xa(b) −
1125
+ � ba
1126
+ 0
1127
+ xa(z, b−a)dz.
1128
+ A second appealing property of truthful actions is that the optimal bidding strategy for auto-
1129
+ bidders is simpler: in the discrete-query model uniform bidding strategy is almost optimal and can
1130
+ differ from optimal by at most the value of two queries (Aggarwal et al., 2019). We revisit this result
1131
+ in our continuous-query model and show that uniform bidding policy is optimal for truthful auctions.
1132
+ Claim 5.4. In the continuous-query model, if the per-quuery auction is truthful then using a uniform
1133
+ bidding is an optimal strategy for each auto-bidder.
1134
+ Proof. We use Theorem 1 Aggarwal et al. (2019). Pick some small δ > 0 and divide the interval
1135
+ [0, 1] into subintervals of length δ. Let each subinterval I be a discrete query with value functions
1136
+ vj(I) =
1137
+
1138
+ I vj(q)dq. Then Theorem 1 Aggarwal et al. (2019) implies that uniform bidding differs from
1139
+ optimal by at most two queries. So, the difference from optimal is bounded by 2 maxj max|I|≤δ vj(I).
1140
+ Now, since the valuation functions are atomless (i.e., the value of a query is dq), by letting δ to 0,
1141
+ the error of uniform bidding in the continuous case also goes to zero.
1142
+ 5.1
1143
+ SPA in the Continuous-Query Model
1144
+ We generalize the discrete example of second price auction in Theorem 2.1 to the continuous set
1145
+ of queries model showing that SPA is not AIC. The key step consists on showing that for the
1146
+ continuous-query model there is an auction equivalence result between first and second price auction.
1147
+ 17
1148
+
1149
+ Theorem 5.5. [Auction Equivalence Result] Suppose that auto-bidder uses a uniform bid strategy
1150
+ for SPA, and similarly, uses the simple bidding strategy defined in Claim 4.3 for FPA. Then, in any
1151
+ subgame equilibrium the outcome of the auctions (allocations and pricing) on SPA is the same as in
1152
+ FPA.
1153
+ This result immediately implies that all the results for FPA in Section 4 hold for SPA as well.
1154
+ Theorem 5.6. Suppose that there are at least two budget-advertisers or two tCPA-advertisers, then
1155
+ even for the continuous-query model SPA is not AIC.
1156
+ Similarly to FPA case, we can characterize the equilibrium for the two-advertiser case and derive
1157
+ sufficient conditions on advertisers’ valuation functions so that SPA is AIC.
1158
+ Theorem 5.7. Given two advertisers, let µ1 and µ2 be the bidding multipliers in equilibrium for the
1159
+ subgame of the auto-bidders. Also assume that h(q) = v1(q)
1160
+ v2(q) is increasing. Then
1161
+ 1. If the advertisers are budget-constrained with budget B1 and B2, then µ1 =
1162
+ B2
1163
+ E[z1(z≥r)] and
1164
+ µ2 = µ1r, where r is the answer of the following implicit function,
1165
+ rE[1[z ≥ r)]
1166
+ E[z1(z ≤ r)] = B1
1167
+ B2
1168
+ .
1169
+ Here, E[.] is defined as E[P(z)] =
1170
+ � ∞
1171
+ 0 P(z)f(z)dz, where f(z) = v2(h−1(z))
1172
+ h′(h−1(z)) wherever h′ is
1173
+ defined, and it is zero otherwise.
1174
+ 2. If the advertisers are tCPA-constrained with targets T1 and T2, we have µ1 = T1E[1(z≤r)]
1175
+ E[1(z≥r)]
1176
+ and
1177
+ µ2 = µ1r, where r is the answer of the following implicit function,
1178
+ rE[1(z ≥ r)]
1179
+ E[z1(z ≥ r)]
1180
+ E[1(z ≤ r)]
1181
+ E[z1(z ≤ r)] = T1
1182
+ T2
1183
+ .
1184
+ 3. If further, v2 is non-decreasing in q, and h is concave, and advertiers are either both budget-
1185
+ constrained two tCPA-constrained, then SPA is AIC.
1186
+ We now demonstrate the auction equivalence between FPA and SPA.
1187
+ Proof of Theorem 5.5. Note that the optimal strategy for a second-price auction is uniform bidding
1188
+ with respect to the true value of the query by Claim 5.4. Also, Claim 4.3 implies that the cost obtained
1189
+ by each advertiser in first-price auction in the continuous model is also depends on pacing multipliers
1190
+ of the other advertiser. This claim immediately, suggests the equivalent between the optimal. bidding
1191
+ strategies of first and second price auctions. So, the optimal strategy for both auctions will be the
1192
+ same and therefore the resulting allocation and pricing will also be the same. Hence, it follows that
1193
+ the same allocation and pricing will be a pure equilibrium under both auctions.
1194
+ 5.2
1195
+ Truthful Auctions Beyond Second-Price
1196
+ We now present the main result of the section. We show that a general truthful auction (with
1197
+ possibly random allocation) is not AIC.
1198
+ 18
1199
+
1200
+ Theorem 5.8. Consider a truthful auction (x, p) satisfying Assumption 5.1. If there are at least
1201
+ two budget-advertisers or two tCPA-advertisers, then the truthful auction is not AIC.
1202
+ The remainder of the section gives an overview of the proof of this theorem. Similar to the
1203
+ FPA and SPA case, we start by characterizing the equilibrium in the continuous case when there
1204
+ are two advertisers in the game. The proof relies on the observation that for auctions satisfying
1205
+ Assumption 5.1, the allocation probability is a function of the bids’ ratios. So, again, similar to FPA
1206
+ and SPA finding the equilibrium reduces to finding the ratio of bidding multipliers. Then to finish
1207
+ the proof of Theorem 5.8 instead of providing an explicit example where auto-bidding is non-AIC, we
1208
+ showed that the conditions needed for an auction’s allocation probability to satisfy are impossible.
1209
+ The following theorem finds an implicit equation for the best response. We omit the proofs of
1210
+ the intermediaries steps and deferred them to the Appendix C.
1211
+ Theorem 5.9. Consider a truthful auction (x, p) satisfying Assumption 5.1 and assume that there
1212
+ are either two budget-advertisers or two tCPA-advertisers. Let µ1 and µ2 be the bidding multipliers
1213
+ used by the auto-bidders in the subgame equilibrium. Further, assume that h(q) = v1(q)
1214
+ v2(q) is increasing.
1215
+ Then
1216
+ 1. If the advertisers are budget-constrained with budget B1 and B2, then µ1 =
1217
+ B1
1218
+ E[p1(rz,1)] and
1219
+ µ2 = rµ1, where r is the answer of the following implicit function,
1220
+ E[rp1( z
1221
+ r, 1)]
1222
+ E[zp1( r
1223
+ z, 1)] = B1
1224
+ B2
1225
+ .
1226
+ Here, E[.] is defined as E[P(z)] =
1227
+ � ∞
1228
+ 0 P(z)f(z)dz, where f(z) = v2(h−1(z))
1229
+ h′(h−1(z)) wherever h′ is
1230
+ defined, and it is zero otherwise.
1231
+ 2. If the advertisers are tCPA-constrained with targets T1 and T2, we have µ1 = T1E[zg(z/r)]
1232
+ E[rp1(z/r)] and
1233
+ µ2 = µ1r, where r is the answer of the following implicit function,
1234
+ E[x1( r
1235
+ z, 1)]
1236
+ E[zx1( z
1237
+ r, 1)]
1238
+ E[rp1( z
1239
+ r, 1)]
1240
+ E[zp1( r
1241
+ z, 1)] = T1
1242
+ T2
1243
+ .
1244
+ Because allocation probability x1 is a non-decreasing function, we can derive a similar result to
1245
+ the FPA case and show if an instance is not AIC for budget-advertisers then it is also not AIC for
1246
+ tCPA-advertisers.
1247
+ Proposition 5.10. If for the two budget-constrained advertisers case the truthful auction is not
1248
+ AIC, then for the tCPA-constrained advertisers case the same auction is also not AIC.
1249
+ Using the previous results we are in position to tackle the main theorem.
1250
+ Proof of Theorem 5.8. We prove Theorem 5.8 for budget constrained advertisers, since Proposi-
1251
+ tion 5.10 would derive it for tCPA constraint advertisers. We use implicit function theorem to find
1252
+ conditions on p1 and f to imply monotonicity in r. Let
1253
+ H(x, r) =
1254
+ � ∞
1255
+ 0 rf(z)p1(z/r, 1)dz
1256
+ � ∞
1257
+ 0 f(z)zp1(r/z, 1)dz − x.
1258
+ 19
1259
+
1260
+ Then when advertiser 1 increases budget, the corresponding variable x increases. So, if we want to
1261
+ check whether r is a non-decreasing function of x, we need dr
1262
+ dx to be non-negative. By the implicit
1263
+ function theorem,
1264
+ dr
1265
+ dx = −
1266
+ ∂H
1267
+ ∂x
1268
+ ∂H
1269
+ ∂r
1270
+ =
1271
+ 1
1272
+ ∂H
1273
+ ∂r
1274
+ .
1275
+ So, assume to the contrary that r i always non-decreasing in x, then ∂H(x,r
1276
+ ∂r
1277
+ ≥ 0.
1278
+ Define
1279
+ p(x) = p1(x, 1). Then we have the following
1280
+ E[ d
1281
+ drrp(z/r)]E[zp(r/z)] ≥ E[rp(z/r)]E[ d
1282
+ dr
1283
+
1284
+ zp(r/z)
1285
+
1286
+ ].
1287
+ Then
1288
+ d
1289
+ drE[rp(z/r)]
1290
+ E[rp(z/r)]
1291
+
1292
+ d
1293
+ drE[zp(z/r)]
1294
+ E[zp(z/r)]
1295
+ By integrating both parts, we have that for any choice of f,
1296
+ rE[p(z/r)] ≥ E[zp(r/z)].
1297
+ When the above inequality hold for any choice of v1 and v2, we claim that the following must hold
1298
+ almost everywhere
1299
+ p(b) ≥ bp(1/b).
1300
+ (11)
1301
+ To see this, assume to the contrary that there exist a measurable set B such that (11) does not hold
1302
+ for it. Let qv2(q) = v1(q), therefore, f(z) = v2(z) can be any measurable function. So, we can define
1303
+ f to have zero value everywhere except X, and have weight 1 over X to get a contradiction.
1304
+ By substituting variable with y = 1/b in (11), p(1/b)db ≥ p(b)/bdb. Therefore, almost everywhere
1305
+ p(b) = bp(1/b). By differentiating we have p′(b) = p(1/b) − p′(1/x)/x. On the other hand, as we will
1306
+ see in Appendix C for any truthful auction satisfying Assumption 5.1, p′(b) = p′(1/b). Therefore,
1307
+ p(b) = p′(b)(b + 1). Solving it for p, we get that the only possible AIC pricing must be of the form
1308
+ p(b) = α(b + 1) for some α > 0.
1309
+ Next, we will show there is no proper allocation probability satisfying the Assumption 5.1 that
1310
+ would result in a pricing function p. It is not hard to see that by the Myerson’s pricing formulae,
1311
+ dx1(b,1)
1312
+ db
1313
+ = p′(b)
1314
+ b . Therefore, we must have x′
1315
+ 1(b, 1) = α/b, so x1(b, 1) = c log(b) + d for some constants
1316
+ c > 0 and d. But x1 cannot be a valid allocation rule, since it will take negative values for small
1317
+ enough b.
1318
+ References
1319
+ Gagan Aggarwal, Ashwinkumar Badanidiyuru Varadaraja, and Aranyak Mehta. 2019. Autobidding
1320
+ with Constraints. In Web and Internet Economics 2019.
1321
+ Amine Allouah and Omar Besbes. 2020. Prior-independent optimal auctions. Management Science
1322
+ 66, 10 (2020), 4417–4432.
1323
+ 20
1324
+
1325
+ Santiago Balseiro, Yuan Deng, Jieming Mao, Vahab Mirrokni, and Song Zuo. 2021a.
1326
+ Robust
1327
+ Auction Design in the Auto-bidding World. In Advances in Neural Information Processing Systems,
1328
+ M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan (Eds.), Vol. 34.
1329
+ Curran Associates, Inc., 17777–17788.
1330
+ https://proceedings.neurips.cc/paper/2021/file/
1331
+ 948f847055c6bf156997ce9fb59919be-Paper.pdf
1332
+ Santiago Balseiro, Yuan Deng, Jieming Mao, Vahab Mirrokni, and Song Zuo. 2021b. Robust Auction
1333
+ Design in the Auto-bidding World. Advances in Neural Information Processing Systems 34 (2021),
1334
+ 17777–17788.
1335
+ Santiago R. Balseiro, Yuan Deng, Jieming Mao, Vahab Mirrokni, and Song Zuo. 2022. Optimal
1336
+ Mechanisms for Value Maximizers with Budget Constraints via Target Clipping. In Proceedings
1337
+ of the 23rd ACM Conference on Economics and Computation (Boulder, CO, USA) (EC ’22).
1338
+ Association for Computing Machinery, New York, NY, USA, 475.
1339
+ https://doi.org/10.1145/
1340
+ 3490486.3538333
1341
+ Santiago R Balseiro, Yuan Deng, Jieming Mao, Vahab S Mirrokni, and Song Zuo. 2021c. The
1342
+ landscape of auto-bidding auctions: Value versus utility maximization. In Proceedings of the 22nd
1343
+ ACM Conference on Economics and Computation. 132–133.
1344
+ Santiago R Balseiro and Yonatan Gur. 2019. Learning in repeated auctions with budgets: Regret
1345
+ minimization and equilibrium. Management Science 65, 9 (2019), 3952–3968.
1346
+ Xi Chen, Christian Kroer, and Rachitesh Kumar. 2021. The Complexity of Pacing for Second-Price
1347
+ Auctions. In Proceedings of the 22nd ACM Conference on Economics and Computation (Budapest,
1348
+ Hungary) (EC ’21). Association for Computing Machinery, New York, NY, USA, 318.
1349
+ Vincent Conitzer, Christian Kroer, Debmalya Panigrahi, Okke Schrijvers, Nicolas E Stier-Moses,
1350
+ Eric Sodomka, and Christopher A Wilkens. 2022a. Pacing Equilibrium in First Price Auction
1351
+ Markets. Management Science (2022).
1352
+ Vincent Conitzer, Christian Kroer, Eric Sodomka, and Nicolas E Stier-Moses. 2022b. Multiplicative
1353
+ pacing equilibria in auction markets. Operations Research 70, 2 (2022), 963–989.
1354
+ Yuan Deng, Jieming Mao, Vahab Mirrokni, and Song Zuo. 2021a. Towards Efficient Auctions
1355
+ in an Auto-Bidding World. In Proceedings of the Web Conference 2021 (Ljubljana, Slovenia)
1356
+ (WWW ’21). Association for Computing Machinery, New York, NY, USA, 3965–3973.
1357
+ https:
1358
+ //doi.org/10.1145/3442381.3450052
1359
+ Yuan Deng, Jieming Mao, Vahab Mirrokni, and Song Zuo. 2021b. Towards efficient auctions in an
1360
+ auto-bidding world. In Proceedings of the Web Conference 2021. 3965–3973.
1361
+ Aris Filos-Ratsikas, Yiannis Giannakopoulos, Alexandros Hollender, Philip Lazos, and Diogo Poças.
1362
+ 2021. On the complexity of equilibrium computation in first-price auctions. In Proceedings of the
1363
+ 22nd ACM Conference on Economics and Computation. 454–476.
1364
+ Jason Gaitonde, Yingkai Li, Bar Light, Brendan Lucier, and Aleksandrs Slivkins. 2022. Budget
1365
+ Pacing in Repeated Auctions: Regret and Efficiency without Convergence.
1366
+ arXiv preprint
1367
+ arXiv:2205.08674 (2022).
1368
+ 21
1369
+
1370
+ Negin Golrezaei, Patrick Jaillet, Jason Cheuk Nam Liang, and Vahab Mirrokni. 2021a. Bidding and
1371
+ pricing in budget and ROI constrained markets. arXiv preprint arXiv:2107.07725 (2021).
1372
+ Negin Golrezaei, Ilan Lobel, and Renato Paes Leme. 2021b. Auction Design for ROI-Constrained
1373
+ Buyers. In Proceedings of the Web Conference 2021 (WWW ’21). 3941–3952.
1374
+ Michael T Goodrich and Roberto Tamassia. 2001. Algorithm design: foundations, analysis, and
1375
+ internet examples. The fractional knapsack problem. John Wiley & Sons.
1376
+ Juncheng Li and Pingzhong Tang. 2022. Auto-bidding Equilibrium in ROI-Constrained Online
1377
+ Advertising Markets. arXiv preprint arXiv:2210.06107 (2022).
1378
+ Christopher Liaw, Aranyak Mehta, and Andres Perlroth. 2022. Efficiency of non-truthful auctions
1379
+ under auto-bidding.
1380
+ https://doi.org/10.48550/ARXIV.2207.03630
1381
+ Aranyak Mehta. 2022. Auction Design in an Auto-Bidding Setting: Randomization Improves
1382
+ Efficiency Beyond VCG. In Proceedings of the ACM Web Conference 2022 (Virtual Event, Lyon,
1383
+ France) (WWW ’22). Association for Computing Machinery, New York, NY, USA, 173–181.
1384
+ https://doi.org/10.1145/3485447.3512062
1385
+ Aranyak Mehta and Andres Perlroth. 2023. Auctions without commitment in the auto-bidding world.
1386
+ https://doi.org/10.48550/ARXIV.2301.07312
1387
+ Roger B. Myerson. 1981. Optimal Auction Design. Mathematics of Operations Research 6, 1 (1981),
1388
+ 58–73. https://doi.org/10.1287/moor.6.1.58 arXiv:https://doi.org/10.1287/moor.6.1.58
1389
+ A
1390
+ Second-price tCPA constrained
1391
+ Proof. We continue with the proof of Theorem 2.1. We first prove the uniqueness of equilibrium in
1392
+ the case of B′
1393
+ 1 = 1. irst, note that there’s no equilibrium such that advertiser 1 wins all the queries.
1394
+ To see this, note that the multiplier of advertiser 1 is at most 1. Hence, the price of q3 for advertiser
1395
+ 2 is within their budget, and they have the incentive to increase their multiplier to buy q3. Similarly,
1396
+ one can see that in any equilibrium, advertiser 1 gets at least q1, since its highest price is within
1397
+ their budget.
1398
+ Now, assume some equilibrium exists with bidding prices ˜µ1 and ˜µ2 such that advertiser 1 gets
1399
+ only q1. Then
1400
+ ˜µ1(v1(1) + v1(2) + v1(3))
1401
+ B2
1402
+ > 1 ≥ ˜µ2v2(1)
1403
+ B1
1404
+ ,
1405
+ where the first inequality is because advertiser 2’s multiplier is the best response, and the second is
1406
+ coming from the budget constraint for advertiser 1. Therefore,
1407
+ B1
1408
+ B2
1409
+ v1(1) + v1(2) + v1(3)
1410
+ v2(1)
1411
+ ≥ ˜µ2
1412
+ ˜µ1
1413
+ ,
1414
+ But v1(2)
1415
+ v2(2) ≥ B1
1416
+ B2
1417
+ v1(1)+v1(2)+v1(3)
1418
+ v2(1)
1419
+ = 9
1420
+ 4, and thus v1(2)
1421
+ v2(2) > ˜µ2
1422
+ ˜µ1 . This is in contradiction with allocation
1423
+ inequalities since advertiser 2 wins q2. Therefore, we proved with B1 = 1 and B2 = 4 the equilibrium
1424
+ is unique such that advertiser 1 wins q1 and q2.
1425
+ 22
1426
+
1427
+ Now it remains to show a non AIC example for tCPA advertisers. Again consider two advertisers,
1428
+ and 3 queries, with the same values as in Table 2. Here, let the tCPA constraint of advertiser 1 be
1429
+ T1 = 0.4 and for advertiser 2 be T2 = 0.7. Then again we show that there exists a unique equilibrium
1430
+ in which advertiser 1 gets queries 1 and 2.
1431
+ First, to prove the existence, let µ1 = 1.6 and µ2 = 1.2. Then we show this is an equilibrium
1432
+ since the three following conditoins hold:
1433
+ 1. Allocation: advertiser 1 wins q1 and q2 since it has a higher bid on them
1434
+ v1(1)
1435
+ v2(1) ≥ v1(2)
1436
+ v2(2) ≥ µ2
1437
+ µ1
1438
+ = 1.2
1439
+ 1.5 ≥ v1(3)
1440
+ v2(3).
1441
+ 2. tCPA constraints are satisfied:
1442
+ T2v2(3) ≥ µ1v1(3),
1443
+ and
1444
+ T1(v1(1) + v1(2)) ≥ µ2(v2(1) + v2(2)).
1445
+ 3. Best response: non of the advertiser can win more queries if they increase their multiplier:
1446
+ T2(v2(3) + v2(2)) < µ1(v1(3) + v1(2)),
1447
+ and
1448
+ T1(v1(1) + v1(2) + v1(3)) < µ2(v2(1) + v2(2) + v2(3)).
1449
+ Now, similar to the proof of the budget-constrained advertisers we show the equilibrium is unique.
1450
+ Note that there’s no equilibrium such that advertiser 1, gets all queries since the cost of all queries
1451
+ for advertiser 1 is at least v2(1) + v2(2) + v2(3) = 12.3 which is larger than T1(v1(1) + v1(2) + v1(3)).
1452
+ Similarly, advertiser 2 cannot get all queries since the tCPA constraint would not hold v1(1) +v1(2) +
1453
+ v1(3) > T2(v2(1) + v2(2) + v2(3)). So, to prove the uniqueness of equilibrium, it remains to show that
1454
+ there’s no equilibrium that advertiser 1 only gets query 1. To contradiction, assume such equilibrium
1455
+ exists with the corresponding multipliers ˜µ1 and ˜µ2. Then we must have
1456
+ ˜µ1(v1(1) + v1(2) + v1(3))
1457
+ T2(v2(1) + v2(2) + v2(3)) > 1 ≥ ˜µ2v2(1)
1458
+ T1v1(1),
1459
+ where the first inequality is because advertiser 2’s multiplier is the best response, and the second
1460
+ inequality is coming from the budget constraint for advertiser 1. Therefore,
1461
+ T1
1462
+ T2
1463
+ v1(1) + v1(2) + v1(3)
1464
+ v2(1) + v2(2) + v2(3)
1465
+ v1(1)
1466
+ v2(1) ≥ ˜µ2
1467
+ ˜µ1
1468
+ ,
1469
+ But v1(2)
1470
+ v2(2) ≥ T1
1471
+ T2
1472
+ v1(1)+v1(2)+v1(3)
1473
+ v2(1)+v2(2)+v2(3)
1474
+ v1(1)
1475
+ v2(1), and thus v1(2)
1476
+ v2(2) > ˜µ2
1477
+ ˜µ1 . This is in contradiction with allocation
1478
+ inequalities since advertiser 2 wins q2. Therefore, we proved with T1 = 0.4 and T2 = 0.7, the
1479
+ equilibrium is unique such that advertiser 1 wins q1 and q2.
1480
+ Now, we show that if advertiser 1 increases their tCPA constraint to T ′
1481
+ 1 = 0.6, then there exists
1482
+ an equilibrium such that advertiser 1 only wins q1. Let µ′
1483
+ 1 = 1 and µ2 = 2.38. Then
1484
+ 1. Allocation: advertiser 1 wins q1
1485
+ v1(1)
1486
+ v2(1) ≥ µ′
1487
+ 2
1488
+ µ′
1489
+ 1
1490
+ = 2.38
1491
+ 1.
1492
+ ≥ v1(2)
1493
+ v2(2) ≥ v1(3)
1494
+ v2(3).
1495
+ 23
1496
+
1497
+ 2. tCPA constraints are satisfied:
1498
+ T2(v2(3) + v2(2)) ≥ µ′
1499
+ 1(v1(3) + v1(2)),
1500
+ and
1501
+ T ′
1502
+ 1v1(1) ≥ µ′
1503
+ 2v2(1).
1504
+ 3. Best response: non of the advertiser can win more queries if they increase their multiplier:
1505
+ T2(v2(1) + v2(2) + v2(3)) < µ′
1506
+ 1(v1(1) + v1(2) + v1(3)),
1507
+ and
1508
+ T ′
1509
+ 1(v1(1) + v1(2)) < µ′
1510
+ 2(v2(1) + v2(2)).
1511
+ B
1512
+ First-price pacing equilibrium
1513
+ Proof of Lemma 4.12. We follow the same steps of the proof as in Conitzer et al. (2022a) for tCPA
1514
+ constrained advertisers. Consider two sets of feasible bidding multipliers µ and µ′. We will show
1515
+ that µ∗ = max(µ, µ′) is also feasible, where max is the component wise maximum of the bidding
1516
+ profiles for n advertisers.
1517
+ Each query q is allocated to the bidder with the highest pacing bid. We need to check that
1518
+ constraint (2) is satisfied. Fix advertiser a. Its multiplier in µ∗ must be also maximum in one of µ,
1519
+ or µ′. without loss assume µ∗
1520
+ a = µa. Then the set of queries that a wins with bidding profile µ∗ (X∗
1521
+ a)
1522
+ must be a subset of queries it wins n µ (Xa), since all other advertisers’ bids have either remained
1523
+ the same or increased. On the other hand, the cost of queries a wins stays the same, since it’s a first
1524
+ price auction. Since constraint (2) is feasible for bidding multipliers µ we must have
1525
+ (µa − Ta)
1526
+
1527
+ q∈X
1528
+ va(q) ≤ Ba.
1529
+ But then since X∗ ⊆ X, we have as well
1530
+ (µa − Ta)
1531
+
1532
+ q∈X∗ va(q) = (µ∗
1533
+ a − Ta)
1534
+
1535
+ q∈X∗ va(q) ≤ Ba,
1536
+ which implies µ∗ is a feasible strategy.
1537
+ To complete the proof we need to show the strategy that all advertisers take the maximum feasible
1538
+ pace µ∗
1539
+ a = sup{µa|µ is feasible} results in an equilibrium. To see this, note that if an advertiser’s
1540
+ strategy is not best-response, they have incentive to increase their pace with its constraints remaining
1541
+ satisfied. But then this would result into another feasible pacing strategy and is in contradiction
1542
+ with the choice of the highest pace µ∗
1543
+ a. A similar argument also shows the equilibrium is unique.
1544
+ Assume there exists another pacing equilibrium where an advertiser a exists such that its pace is
1545
+ less than µ∗
1546
+ a. Then by increasing their pace to µ∗
1547
+ a they will get at least as many queries as before, so
1548
+ µ∗
1549
+ a is the best-response strategy.
1550
+ 24
1551
+
1552
+ C
1553
+ Proofs for Truthful Auctions
1554
+ We start by the following observation, which follows by applying Assumption 5.1 to reformulate the
1555
+ allocation function in the case of two advertisers as a function of a single variable.
1556
+ Claim C.1. The probability of allocating each query is a function of the ratio of bids, i.e., there
1557
+ exists a non-decreasing function g : R+ → [0, 1] such that the followings hold.16
1558
+ 1. x1(b1(q), b2(q)) = g( b1(q)
1559
+ b2(q)),
1560
+ 2. g(z) + g(1/z) = 1,
1561
+ 3. g(0) = 0.
1562
+ For example, SPA satisfies the above claim with g(z) = 1 when z = b1(q)
1563
+ b2(q) ≥ 1. We are ready to
1564
+ prove Theorem 5.9, which follows the similar steps of Lemma 4.4.
1565
+ Proof of Theorem 5.9. By Claim 5.4, there exists µ1 and µ2 such that advertiser a bids zava(q) on
1566
+ each query. Therefore, we can write the budget constraint for bidder 1 as,
1567
+ B1 =
1568
+ � 1
1569
+ 0
1570
+ p1(b1(q), b2(q))dq =
1571
+ � 1
1572
+ 0
1573
+ µ1v1(q)g
1574
+ �v1(q)
1575
+ v2(q)
1576
+ µ1
1577
+ µ2
1578
+
1579
+ dq −
1580
+ � 1
1581
+ 0
1582
+ � µ1v1(q)
1583
+ 0
1584
+ g
1585
+
1586
+ x
1587
+ v2(q)µ2
1588
+
1589
+ dxdq
1590
+ Next, with a change of variable x = v1(q)y we have
1591
+ B1 =
1592
+ � 1
1593
+ 0
1594
+ µ1v1(q)g
1595
+ �v1(q)
1596
+ v2(q)
1597
+ µ1
1598
+ µ2
1599
+
1600
+ dq −
1601
+ � 1
1602
+ 0
1603
+ � µ1
1604
+ 0
1605
+ g
1606
+ � v1(q)
1607
+ v2(q)µ2
1608
+ y
1609
+
1610
+ v1(q)dydq.
1611
+ As before, let h(q) = v1(q)
1612
+ v2(q). Then let z = h(q), we have dq = dh−1(z) =
1613
+ 1
1614
+ h′(h−1(z))dz. So,
1615
+ B1 =
1616
+ � ∞
1617
+ 0
1618
+ µ1v1(h−1(z))g
1619
+ �zµ1
1620
+ µ2
1621
+
1622
+ 1
1623
+ h′(h−1(z))dz −
1624
+ � ∞
1625
+ 0
1626
+ � µ1
1627
+ 0
1628
+ g
1629
+ � z
1630
+ µ2
1631
+ y
1632
+
1633
+ v1(h−1(z))dy
1634
+ 1
1635
+ h′(h−1(z))dz.
1636
+ Define f(z) = v2(h−1(z))
1637
+ h′(h−1(z)) = 1
1638
+ z
1639
+ v1(h−1(z))
1640
+ h′(h−1(z)). Then we have
1641
+ B1 =
1642
+ � ∞
1643
+ 0
1644
+ µ1zf(z)g
1645
+ �zµ1
1646
+ µ2
1647
+
1648
+ dz −
1649
+ � ∞
1650
+ 0
1651
+ � � µ1
1652
+ 0
1653
+ g
1654
+ � z
1655
+ µ2
1656
+ y
1657
+
1658
+ dy
1659
+
1660
+ zf(z)dz.
1661
+ Similarly,
1662
+ B2 =
1663
+ � ∞
1664
+ 0
1665
+ µ2v2(h−1(z))(1 − g
1666
+ �zµ1
1667
+ µ2
1668
+
1669
+ )
1670
+ 1
1671
+ h′(h−1(z))dz −
1672
+ � ∞
1673
+ 0
1674
+ � µ2
1675
+ 0
1676
+ g
1677
+ � y
1678
+ µ1z
1679
+
1680
+ v2(h−1(z))dy
1681
+ 1
1682
+ h′(h−1(z))dz.
1683
+ B2 =
1684
+ � ∞
1685
+ 0
1686
+ µ2f(z)(1 − g
1687
+ �zµ1
1688
+ µ2
1689
+
1690
+ )dz −
1691
+ � ∞
1692
+ 0
1693
+ � µ2
1694
+ 0
1695
+ g
1696
+ � y
1697
+ µ1z
1698
+
1699
+ dyf(z)dz.
1700
+ Next, we find the implicit function to derive r = µ2
1701
+ µ1 . By change of variable we have the following
1702
+ two equations:
1703
+ B1
1704
+ µ1
1705
+ =
1706
+ � ∞
1707
+ 0
1708
+ zf(z)g(z/r)dz − r
1709
+ � ∞
1710
+ 0
1711
+ � � z/r
1712
+ 0
1713
+ g(w)dw
1714
+
1715
+ f(z)dz.
1716
+ 16Notice that the function g is measurable since is non-decreasing.
1717
+ 25
1718
+
1719
+ B2
1720
+ µ2
1721
+ =
1722
+ � ∞
1723
+ 0
1724
+ f(z)(1 − g(z/r))dz − 1
1725
+ r
1726
+ � ∞
1727
+ 0
1728
+ � � r/z
1729
+ 0
1730
+ g(w)dw
1731
+
1732
+ zf(z)dz.
1733
+ The implicit function for r is the following:
1734
+ B1
1735
+ B2
1736
+ =
1737
+ � ∞
1738
+ 0 f(z)
1739
+
1740
+ zg(z/r) − r
1741
+ � z/r
1742
+ 0
1743
+ g(w)dw
1744
+
1745
+ dz
1746
+ � ∞
1747
+ 0 f(z)
1748
+
1749
+ r(1 − g(z/r) − z
1750
+ � r/z
1751
+ 0
1752
+ g(w)dw
1753
+
1754
+ dz
1755
+ .
1756
+ Recall the payment rule in Assumption 5.3, this can be re-written as
1757
+ B1
1758
+ B2
1759
+ =
1760
+ � ∞
1761
+ 0 rf(z)p1(z/r, 1)dz
1762
+ � ∞
1763
+ 0 zf(z)zp1(r/z, 1)dz ,
1764
+ which finishes the proof for the budget constrained advertisers.
1765
+ Now, consider two tCPA constrained advertisers. Following the same argument as above, we get
1766
+ the following from tightness of tCPA constraints
1767
+ T1
1768
+ � ∞
1769
+ 0
1770
+ zf(z)g
1771
+ �zµ1
1772
+ µ2
1773
+
1774
+ dz =
1775
+ � ∞
1776
+ 0
1777
+ µ1zf(z)g
1778
+ �zµ1
1779
+ µ2
1780
+
1781
+ dz −
1782
+ � ∞
1783
+ 0
1784
+ � � µ1
1785
+ 0
1786
+ g
1787
+ � z
1788
+ µ2
1789
+ y
1790
+
1791
+ dy
1792
+
1793
+ zf(z)dz,
1794
+ and,
1795
+ T2
1796
+ � ∞
1797
+ 0
1798
+ f(z)(1 − g
1799
+ �zµ1
1800
+ µ2
1801
+
1802
+ )dz =
1803
+ � ∞
1804
+ 0
1805
+ µ2f(z)(1 − g
1806
+ �zµ1
1807
+ µ2
1808
+
1809
+ )dz −
1810
+ � ∞
1811
+ 0
1812
+ � µ2
1813
+ 0
1814
+ g
1815
+ � y
1816
+ µ1z
1817
+
1818
+ dyf(z)dz.
1819
+ By dividing both sides of the equations we get the desired results.
1820
+ Now, to prove the main theorem, we need to show that the values accrued by advertisers is
1821
+ monotone in µ1/µ2.
1822
+ Claim C.2. Let µi be the optimal bidding multiplier for advertiser i. Given the assumptions in
1823
+ Theorem 5.8, the value obtained by advertiser 1 is increasing in r = µ1
1824
+ µ2 .
1825
+ Proof. Following the proof of Theorem 5.9 we can write value obtained by advertiser i as
1826
+ V1(B1, B2) =
1827
+ � ∞
1828
+ 0
1829
+ f(z)zg(rz)dz,
1830
+ where r is the answer to the implicit function stated in Theorem 5.9. Monotonicity of V1(B1, B2) as
1831
+ a function of r follows from the fact that g is a monotone function.
1832
+ 26
1833
+
0NFQT4oBgHgl3EQfzzas/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
0dFKT4oBgHgl3EQfNy2J/content/2301.11756v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6b385dd8600c471d64a88dfc75cfa6fb3507a74f198495325c4a1dc74794682
3
+ size 178619
0dFKT4oBgHgl3EQfNy2J/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12450815d4db0f8b59947692cfbd446ca5ed19e02f077c91d2e9ae750ea671b7
3
+ size 1900589
0tE2T4oBgHgl3EQfiQfL/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55cdda13a676c16e8aed73eb0d772c3e0ff9118c86a1afbaa7ea03f5f5a57cec
3
+ size 2293805
2dFST4oBgHgl3EQfXzgB/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df1a08c23793be0f5642f8203f824860bc85591093aea8c84523e3d1f371a6bc
3
+ size 2752557
2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e313915d9845172b5a9df8726fa77f08ab12f19965c9a3bee73e29d0d043fe9
3
+ size 4833620
2tE2T4oBgHgl3EQfjAfh/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4849547eefa2213d18d665aee247543808f341dd1000a7e2b76df8f208d123dc
3
+ size 155587
2tFLT4oBgHgl3EQfrC9d/content/2301.12142v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51c86dc4320c42ee2bd3bb76fcc56bf9c079cde47a60ba07a76e6572ed4624bc
3
+ size 197833
2tFLT4oBgHgl3EQfrC9d/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2bd3b23d6a387ce3025cddaf224d6984cf7fb4b129aa75c87b68b1a8a0476f2
3
+ size 3211309
2tFLT4oBgHgl3EQfrC9d/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:822c4adbb4f0008699df6d659dff85eb8682ee98b095ee42b8dd9fb2977ab74f
3
+ size 121244
39FAT4oBgHgl3EQflh2J/content/tmp_files/2301.08618v1.pdf.txt ADDED
@@ -0,0 +1,1102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.08618v1 [cs.LG] 20 Jan 2023
2
+ 1
3
+ Coupled Physics-informed Neural Networks for
4
+ Inferring Solutions of Partial Differential Equations
5
+ with Unknown Source Terms
6
+ Aina Wang, Pan Qin, Xi-Ming Sun, Senior Member, IEEE,
7
+ Abstract—Physics-informed neural networks (PINNs) provide
8
+ a transformative development for approximating the solutions
9
+ to partial differential equations (PDEs). This work proposes
10
+ a coupled physics-informed neural network (C-PINN) for the
11
+ nonhomogeneous PDEs with unknown dynamical source terms,
12
+ which is used to describe the systems with external forces and
13
+ cannot be well approximated by the existing PINNs. In our
14
+ method, two neural networks, NetU and NetG, are proposed.
15
+ NetU is constructed to generate a quasi-solution satisfying PDEs
16
+ under study. NetG is used to regularize the training of NetU.
17
+ Then, the two networks are integrated into a data-physics-hybrid
18
+ cost function. Finally, we propose a hierarchical training strategy
19
+ to optimize and couple the two networks. The performance of
20
+ C-PINN is proved by approximating several classical PDEs.
21
+ Index Terms—Coupled physics-informed neural network, hier-
22
+ archical training strategy, partial differential equations, unknown
23
+ source term
24
+ I. Introduction
25
+ P
26
+ ARTIAL differential equations (PDEs) are one of the
27
+ general representations for describing spatio-temporal
28
+ dependence in physics [1], medicine [2], engineering [3],
29
+ finance [4], and weather [5], [6]. Numerical approaches, like
30
+ the finite difference method (FDM) [7] and finite element
31
+ (FEM) [8], [9], have been widely investigated and applied.
32
+ FDM used a topologically square lines network to construct
33
+ PDEs’ discretization. Thus, complex geometries in multiple
34
+ dimensions challenge FDM [10]. On the other hand, compli-
35
+ cated geometries can be treated with FEM [11]. The greatest
36
+ difficulty of classical numerical approaches is balancing the
37
+ accuracy and efficiency of forming meshes.
38
+ Among the numerical methods for solving PDEs, the
39
+ Galerkin method is a famous computation method in which the
40
+ linear combination of basis functions was employed to approx-
41
+ imate the solutions to PDEs [12]. Motivated by this, several
42
+ works have used machine learning models to replace the linear
43
+ combination of basis functions to construct data-efficient and
44
+ physics-informed learning methods for solving PDEs [13]–
45
+ [15]. Successful applications of deep learning methods to
46
+ various fields, like image [16], text [17], and speech recogni-
47
+ tion [18], ensure that they are excellent replacers of the linear
48
+ combination of basis functions for solving PDEs [4]. Conse-
49
+ quently, leveraging the well-known approximation capability
50
+ The authors are with the Key Laboratory of Intelligent Control and Opti-
51
+ mization for Industrial Equipment of Ministry of Education and the School
52
+ of Control Science and Engineering, Dalian University of Technology, Dalian
53
+ 116024, China e-mail: WangAn@mail.dlut.edu.cn, qp112cn@dlut.edu.cn,
54
+ sunxm@dlut.edu.cn (Corresponding author: Pan Qin)
55
+ of neural networks to solve PDEs is a natural idea and has
56
+ been investigated in various forms previously [19]–[21]. The
57
+ framework of physics-informed neural networks (PINNs) [22]
58
+ was introduced to solve the forward problems while respecting
59
+ any given physical laws governed by PDEs, including the
60
+ nonlinear operator, initial, and boundary conditions. Within
61
+ the PINNs framework, both the sparse measurements and
62
+ the physical knowledge were fully integrated into cost func-
63
+ tion [23], [24]. The solution with respect to spatio-temporal
64
+ dependence was obtained by training the cost function. Note
65
+ that the approximation obtained by machine learning and deep
66
+ learning is meshfree, which has no problem on balancing
67
+ accuracy and efficiency of forming meshes.
68
+ Meanwhile, the potential of using PINNs to solve the inverse
69
+ problem is promising [25]. A hybrid PINN was proposed
70
+ to solve PDEs in [26], in which a local fitting method was
71
+ combined with neural networks to solve PDEs. The hybrid
72
+ PINN was used to identify unknown constant parameters
73
+ in PDEs. The generative adversarial network (GAN) [27]
74
+ was also physics-informed to solve the inverse problems.
75
+ The stochastic physics-informed GAN was investigated for
76
+ estimating the distributions of unknown parameters in PDEs.
77
+ The recent work [28] encoded the physical laws governed
78
+ by PDEs into the architecture of GANs to solve the inverse
79
+ problems for stochastic PDEs. PINNs were also combined with
80
+ the Bayesian method to solve inverse problems from noisy data
81
+ [29].
82
+ PDEs can be classified into homogeneous and nonhomoge-
83
+ neous types. Systems without external forces can be described
84
+ by the homogeneous PDEs. The nonhomogeneous PDEs can
85
+ be applied to reveal the continuous energy propagation be-
86
+ havior of the source and hereby are effective for describing
87
+ practical systems driven by external forces. The function forms
88
+ of the solution and the source term were both assumed to be
89
+ unknown in [30], in which the measurements of the source
90
+ term should be obtained separately from the measurements of
91
+ the solution. However, the independent measurements of the
92
+ external forces cannot always be easily obtained from practical
93
+ situations. The recent work [31] can directly solve the steady-
94
+ state PDEs’ forward and inverse problems, where the source
95
+ terms were assumed to be constant. Thus, [31] was not feasible
96
+ for systems with unsteady external forces, which should be
97
+ described by dynamical functions.
98
+ Although the aforementioned methods have made great
99
+ progress on unknown parameters, prior information or mea-
100
+ surements on external forces cannot always be easily obtained
101
+
102
+ 2
103
+ from practical situations. For example, the real distribution of
104
+ the seismic wave field underground is unknown [32]; the vast
105
+ of signals internal engine, indicating the operation state of
106
+ the engine, cannot be isolated [33]. Furthermore, the existing
107
+ methods with the assumption of the constant source term
108
+ cannot be readily extended to describe the spatio-temporal
109
+ dependence of complex dynamical systems. The determination
110
+ of dynamical source terms with less prior information or even
111
+ without any prior information is an under-investigated issue.
112
+ To this end, this paper proposes a coupled-PINN (C-PINN),
113
+ using the sparse measurements and limited prior information
114
+ of PDEs, to solve PDEs with unknown source terms. In our
115
+ method, two neural networks, NetU and NetG, are proposed.
116
+ NetU is applied to generate a quasi-solution satisfying PDEs
117
+ under study; NetG is used to regularize the training of NetU.
118
+ Then, the two networks are integrated into a data-physics-
119
+ hybrid cost function. Furthermore, we propose a hierarchical
120
+ training strategy to optimize and couple the two networks.
121
+ Finally, the proposed C-PINN is applied to solve several
122
+ classical PDEs to demonstrate its performance.
123
+ The rest of the paper is organized as follows. The classical
124
+ PINNs is briefly reviewed in Section II. A C-PINN using the
125
+ sparse measurements and limited prior knowledge to solve
126
+ PDEs with unknown source terms is proposed in Section III.
127
+ Meanwhile, the two neural networks, NetU and NetG, are
128
+ proposed in our method. Furthermore, a hierarchical training
129
+ strategy is proposed to optimize and couple the two networks.
130
+ In Section IV, our proposed C-PINN is validated with four
131
+ case studies. In Section V, the concluding remarks and the
132
+ future work are presented.
133
+ II. Brief Review of PINNs
134
+ In this section, we briefly review the basic idea of PINNs
135
+ for data-driven solutions to PDEs and data-driven discovery
136
+ of PDEs [22].
137
+ Data-driven solutions to PDEs describe that solve PDEs of
138
+ the generalized form
139
+ ut(x, t) + N[u(x, t)] = 0, x ∈ Ω ⊆ Rd, t ∈ [0, T] ⊂ R
140
+ (1)
141
+ with known parameters. Here, x is the spatial variable, t is
142
+ the temporal variable with t = 0 being at the initial state,
143
+ u : Rd × R → R denotes the hidden solution, N[·] is a
144
+ series of partial differential operators, the domain Ω ⊆ Rd is
145
+ a spatial bounded open set with the boundary ∂Ω. Analytical
146
+ or numerical methods have been widely investigated to find
147
+ proper solution ψ(x, t) satisfying (1) [34]. The left-hand-side
148
+ of (1) can be used to define a residual function as the following
149
+ f(x, t) := ut(x, t) + N[u(x, t)],
150
+ (2)
151
+ where a neural network is used to approximate the solution
152
+ ψ(x, t) to PDEs. The inverse problem is focused on the data-
153
+ driven discovery of PDEs of the generalized form (1), where
154
+ unknown parameters of PDEs here turn into parameters of
155
+ PINNs.
156
+ PINNs for both problems can be trained by minimizing the
157
+ cost function
158
+ MSE = MSED + MSEPH.
159
+ (3)
160
+ Here, MSED is formulated as the following
161
+ MSED =
162
+
163
+ (x,t,u)∈D
164
+
165
+ ˆu
166
+
167
+ x, t; ˆΘU
168
+
169
+ − u (x, t)
170
+ �2 ,
171
+ (4)
172
+ where ˆu
173
+
174
+ x, t; ˆΘU
175
+
176
+ is the function of neural network with ˆΘU
177
+ being its trained parameter set. Let D denote the training
178
+ dataset. This mean squared error term can be considered as
179
+ the data-driven loss. MSEPH is as the following
180
+ MSEPH =
181
+
182
+ (x,t)∈E
183
+ ˆf (x, t)2 ,
184
+ (5)
185
+ which regularizes ˆu
186
+
187
+ x, t; ˆΘU
188
+
189
+ to satisfy (1). Let E denote
190
+ the set of collocation points. This regularization term can be
191
+ considered as the physics-informed loss for the homogeneous
192
+ PDEs. Here, ˆf (x, t) is defined as
193
+ ˆf (x, t) := ˆut
194
+
195
+ x, t; ˆΘU
196
+
197
+ + N
198
+
199
+ ˆu
200
+
201
+ x, t; ˆΘU
202
+ ��
203
+ ,
204
+ (6)
205
+ where ˆut
206
+
207
+ x, t; ˆΘU
208
+
209
+ and N
210
+
211
+ ˆu
212
+
213
+ x, t; ˆΘU
214
+ ��
215
+ can be obtained using
216
+ automatic differential [35].
217
+ III. Constructing C-PINN
218
+ C-PINN for solving PDEs with unknown source terms is
219
+ presented in this section. The nonhomogeneous PDEs are of
220
+ the following generalized form
221
+ ut(x, t)+N[u(x, t)] = g(x, t), x ∈ Ω ⊆ Rd, t ∈ [0, T] ⊂ R, (7)
222
+ where x and t are the spatial and temporal variable, respec-
223
+ tively, u : Rd ×R → R is similar to (1), g : Rd ×R → R denotes
224
+ the general types of source terms including linear, nonlinear,
225
+ state-steady, or dynamical, Ω is a spatial bounded open set with
226
+ the boundary ∂Ω. Without loss of generality, the spatial set of
227
+ (7) is subjected to Dirichlet boundary, Neumann boundary, or
228
+ the hybrid of Dirichlet and Neumann boundary conditions. In
229
+ general, g(x, t) is used as source terms to describe the external
230
+ forces for dynamical systems and cannot always be separately
231
+ measured, as mentioned in Section I.
232
+ Different from (6), the residual function is defined for the
233
+ nonhomogeneous case as the following
234
+ fN(x, t) := f(x, t)−g(x, t) = ut(x, t)+N[u(x, t)]−g(x, t). (8)
235
+ When g(x, t) is exactly known, ˆfN(x, t), obtained with auto-
236
+ matic differential from (8), can be directly used to regularize
237
+ the approximation of u(x, t). However, the unknown g(x, t)
238
+ will lead to unknown fN(x, t), which makes the aforemen-
239
+ tioned regularization infeasible.
240
+ Therefore, the goal of C-PINN is to approximate the solu-
241
+ tion to PDEs with unknown source terms described by (7). To
242
+ this end, there are two neural networks included in C-PINN:
243
+ (a) NetU for approximating the solution satisfying (7); (b)
244
+ NetG for regularizing the training of NetU.
245
+ 1) Cost function:
246
+ To train C-PINN, the training dataset
247
+ is uniformly sampled from the system governed by (7). The
248
+ training dataset D divided into D = DB∪DI with DB∩DI = ∅,
249
+ where DB denotes the boundary and initial training dataset and
250
+ DI is the training dataset of interior of Ω. Collocation points
251
+
252
+ 3
253
+ (x, t) ∈ E correspond to those of (x, t, u) ∈ DI. Then, we adopt
254
+ the following data-physics-hybrid cost function
255
+ MSE = MSED + MSEPN
256
+ (9)
257
+ to train our proposed C-PINN. MSED and MSEPN in (9)
258
+ are the data-driven loss and physics-informed loss for the
259
+ nonhomogeneous PDEs, respectively. MSED adopts the same
260
+ form of (4). MSEPN is as the following
261
+ MSEPN =
262
+
263
+ (x,t)∈E
264
+ � ˆf (x, t) − ˆg
265
+
266
+ x, t; ˆΘG
267
+ ��2 ,
268
+ where ˆg
269
+
270
+ x, t; ˆΘG
271
+
272
+ is the function of NetG with ˆΘG being
273
+ its trained parameter set,
274
+ ˆf(x, t) has been defined by (2).
275
+ MSEPN corresponds to the physics-informed loss for the
276
+ nonhomogeneous PDEs obtained from (8) imposed at a finite
277
+ set of collocation points (x, t) ∈ E, which is used to regularize
278
+ ˆu
279
+
280
+ x, t; ˆΘU
281
+
282
+ of NetU to satisfy (7).
283
+ 2) Hierarchical training strategy: Considering the relation
284
+ between NetU and NetG in (3), a hierarchical training strategy
285
+ is proposed. In many cases, the exact formulation or even
286
+ sparse measurements of g(x, t) are not available, while the
287
+ sparse measurements DI can be obtained to enforce the
288
+ structure of (7) to achieve ˆΘG. Thus, ΘU and ΘG should be
289
+ iteratively estimated with mutual dependence. Assume k is the
290
+ present iteration step, the core issue of the hierarchical train-
291
+ ing strategy is described by the following two optimization
292
+ problems
293
+ ˆΘ(k+1)
294
+ G
295
+ = arg min
296
+ ΘG
297
+
298
+ MSED
299
+ � ˆΘ(k)
300
+ U
301
+
302
+ + MSEPN
303
+
304
+ ΘG; ˆΘ(k)
305
+ U
306
+ ��
307
+ = arg min
308
+ ΘG
309
+ MSEPN
310
+
311
+ ΘG; ˆΘ(k)
312
+ U
313
+
314
+ (10)
315
+ and
316
+ ˆΘ(k+1)
317
+ U
318
+ = arg min
319
+ ΘU
320
+
321
+ MSED (ΘU) + MSEPN
322
+
323
+ ΘU; ˆΘ(k+1)
324
+ G
325
+ ��
326
+ , (11)
327
+ where ˆΘ(k)
328
+ U is the estimated parameter set of NetU at kth step,
329
+ ˆΘ(k+1)
330
+ G
331
+ is the estimated parameter set of NetG at (k + 1)th step,
332
+ Θ(k+1)
333
+ U
334
+ is the estimated parameter set of NetU at (k + 1)th
335
+ step, which is used to describe the function ˆu
336
+
337
+ x, t; ˆΘ(k+1)
338
+ U
339
+
340
+ .
341
+ The details of the hierarchical training strategy are obtained
342
+ by Algorithm 1.
343
+ Note that Θ(0)
344
+ U and Θ(0)
345
+ G are used as a given parameter set
346
+ for NetU and the initialization of the parameter set for NetG
347
+ at Step 0, respectively. Furthermore, the iterative transmission
348
+ of parameter sets of NetG and NetU happens in the algorithm.
349
+ IV. Numerical experiments
350
+ In this section, our proposed C-PINN is applied to solve
351
+ several classical PDEs to demonstrate its performance. All
352
+ the examples are implemented with TensorFlow. The fully
353
+ connected structure with a hyperbolic tangent activation func-
354
+ tion is applied, which is initialized by Xavier. These training
355
+ dataset (x, t, u) ∈ D and collocation points (x, t) ∈ E are
356
+ then input into NetU and NetG. L-BFGS [36] is used to
357
+ hierarchically solve the optimization problems (10) and (11)
358
+ to couple the two networks.
359
+ Algorithm 1 The hierarchical strategy of optimizing and
360
+ coupling for C-PINN.
361
+ -Initialize: Randomly sampled training dataset (x, t, u) ∈ D
362
+ and collocation points (x, t) ∈ E. Randomly generate initial
363
+ parameter sets Θ(0)
364
+ U and Θ(0)
365
+ G .
366
+ - Step 0: Assume the kth iteration has achieved ˆΘ(k)
367
+ U and
368
+ ˆΘ(k)
369
+ G .
370
+ Repeat:
371
+ - Step k-1: Training for NetG by solving the optimization
372
+ problem (10) to obtain ˆΘ(k+1)
373
+ G
374
+ , where the estimations of
375
+ ˆut
376
+
377
+ x, t; ˆΘ(k)
378
+ U
379
+
380
+ + N
381
+
382
+ ˆu(x, t; ˆΘ(k)
383
+ U
384
+
385
+ in MSEPN is obtained from
386
+ the former iteration result ˆΘ(k)
387
+ U .
388
+ - Step k-2: Training for NetU by solving the optimization
389
+ problem (11) to obtain ˆΘ(k+1)
390
+ U
391
+ , which is used to estimate
392
+ ˆg
393
+
394
+ x, t; Θ(k+1)
395
+ G
396
+
397
+ in MSEPN.
398
+ -Until the stop criterion is satisfied.
399
+ -Return the solution function ˆΘU → ˆu
400
+
401
+ x, t; ˆΘU
402
+
403
+ , which can
404
+ predict the solution (8) with any point (x, t) in Ω.
405
+ We evaluate the performance of our proposed C-PINN by
406
+ means of root mean squared error (RMSE)
407
+ RMSE =
408
+
409
+ 1
410
+ |T|
411
+
412
+ (x,t)∈T
413
+ (u (x, t) − ˆu (x, t))2,
414
+ where |T| is the cardinality with respect to the collocation
415
+ points (x, t) ∈ T, T is the set of testing collocation points.
416
+ u (x, t) and ˆu (x, t) denote the ground truth and the cor-
417
+ responding predictions, respectively. To further validate the
418
+ performance of C-PINN, the Pearson’s correlation coefficient
419
+ (CC)
420
+ CC =
421
+ cov (u (x, t) , ˆu (x, t))
422
+ √Var u (x, t) √Var ˆu (x, t)
423
+ is also used to measure the similarity between ground truth and
424
+ prediction, where CC is the correlation coefficient of u(x, t)
425
+ and ˆu(x, t), cov (u (x, t) , ˆu (x, t)) is the covariance between
426
+ u(x, t) and ˆu(x, t), and Var u (x, t) and Var ˆu (x, t) are variance
427
+ of u(x, t) and ˆu(x, t), respectively.
428
+ A. Case 1: 1-D Heat Equation
429
+ C-PINN is first applied to solve the heat equation with
430
+ unknown external forces, where both Dirichlet and Neumann
431
+ boundary conditions are conducted to demonstrate its perfor-
432
+ mance.
433
+ 1) Dirichlet Boundary Condition
434
+ Here, we consider the heat equation with Dirichlet boundary
435
+ condition as the following
436
+ ∂u
437
+ ∂t = a2 ∂2u
438
+ ∂x2 + g(x, t),
439
+ 0 < x < L, t > 0
440
+ u|t=0 = φ(x),
441
+ 0 ⩽ x ⩽ L
442
+ u|x=0 = 0,
443
+ u|x=L = 0,
444
+ t > 0,
445
+ (12)
446
+ where thermal diffusivity a
447
+ =
448
+ 1, u(x, t) is the primary
449
+ variable and means the temperature at (x, t), L = π is the
450
+ length of bounded rod, φ(x) = 0 is initial temperature, and
451
+
452
+ 4
453
+ g(x, t) = xe−t denotes the unknown external heat source at
454
+ (x, t). The analytical solution u (x, t) to (12) is obtained with
455
+ respect to [37]. In this experiment, the setting-ups of C-PINN
456
+ are as follows. There are eight hidden layers with 20 units
457
+ in each of them for both NetU and NetG. A total of 110
458
+ training data (x, t, u(x, t)) in D with t ∈ [0, 6], including 10
459
+ training data in DI and 100 training data in DB, are randomly
460
+ sampled, 10 sparse collocation points are randomly sampled to
461
+ enforce the structure of (12). Fig. 1 shows the sparse training
462
+ dataset and the prediction results. Specifically, the magnitude
463
+ of the predictions ˆu(x, t) using the training dataset is shown in
464
+ Fig. 1(a) with a heat map. In this case, RMSE is 4.225390e−02
465
+ and the correlation coefficient is 9.785444e − 01. Moreover,
466
+ we compare the ground truths and the predictions at fixed-
467
+ time t= 1.5, 3, and 4.5 in Fig. 1(b) to (d), respectively. The
468
+ evaluation criteria in Table I are applied to further quantify
469
+ the performance of our proposed C-PINN.
470
+ 0
471
+ 1
472
+ 2
473
+ 3
474
+ 4
475
+ 5
476
+ 6
477
+ t
478
+ 0
479
+ 1
480
+ 2
481
+ 3
482
+ x
483
+ ��� ������������������uˆ(x, t)
484
+ ����training data �����
485
+ ���training data ������
486
+ 0.0
487
+ 0.1
488
+ 0.2
489
+ 0.3
490
+ 0.4
491
+ 0.5
492
+ 0.6
493
+ 0.7
494
+ 0
495
+ 2
496
+ x
497
+ 0
498
+ 1
499
+ u(x, t)
500
+ ��� t = 1.5
501
+ 0
502
+ 2
503
+ x
504
+ 0
505
+ 1
506
+ u(x, t)
507
+ ��� t = 3
508
+ 0
509
+ 2
510
+ x
511
+ 0
512
+ 1
513
+ u(x, t)
514
+ ��� t = 4.5
515
+ ������������
516
+ ����������
517
+ Fig. 1.
518
+ (a) Predictions ˆu (x, t) for the 1-D heat equation with Dirichlet
519
+ boundary condition; (b), (c), and (d) Comparisons of the ground truths and
520
+ predictions corresponding to the fixed-time t= 1.5, 3, and 4.5 snapshots
521
+ depicted by the dashed vertical lines in (a), respectively.
522
+ TABLE I
523
+ Evaluation criteria for the three temporal snapshots depicted by the dashed
524
+ vertical lines in Fig. 1-(a)
525
+ Criteria
526
+ 1.5
527
+ 3
528
+ 4.5
529
+ RMSE
530
+ 4.600305e-02 1.342719e-02
531
+ 2.991229e-02
532
+ CC
533
+ 9.753408e-01 9.912983e-01
534
+ 9.805664e-01
535
+ Subsequently, the experiment for PDE with Neumann
536
+ boundary condition will be further explored to show the
537
+ general performance of C-PINN.
538
+ 2) Neumann Boundary Condition
539
+ Heat equation with Neumann boundary condition is defined
540
+ as
541
+ ∂u
542
+ ∂t = a2 ∂2u
543
+ ∂x2 + g(x, t),
544
+ 0 < x < L, t > 0
545
+ u|t=0 = φ(x),
546
+ 0 ⩽ x ⩽ L
547
+ u|x=0 = 0,
548
+ ∂u
549
+ ∂x
550
+ �����x=L
551
+ = 0,
552
+ t > 0,
553
+ (13)
554
+ with the thermal diffusivity a = 1, the length of bounded
555
+ rod L = π, the initial temperature φ(x) = sin (x/2), and
556
+ the external heat source is g(x, t) = sin (x/2). The analytical
557
+ solution u(x, t) to (13) is obtained according to [37]. In this
558
+ example, NetU is of three hidden layers consisting of 30
559
+ neurons individually. NetG is of eight hidden layers consisting
560
+ of 20 units individually. (x, t, u(x, t)) in D are considered with
561
+ t ∈ [0, 10]. A total of 130 training data in DB, including 10
562
+ initial training data, 60 left boundary training data, and 60 right
563
+ boundary training data are randomly sampled. Moreover, the
564
+ 20 sparse collocation points are randomly sampled to enforce
565
+ the structure of (13). The magnitude of the predictions ˆu(x, t)
566
+ using the training dataset is shown in Fig. 2(a). RMSE is
567
+ 5.748950e−02 and the correlation coefficient is 9.988286e−01.
568
+ Moreover, we compare the ground truths and the predictions at
569
+ fixed-time t= 3, 6, and 9 in Fig. 2(b) to (d), respectively. The
570
+ evaluation criteria in Table II are applied to further evaluate
571
+ the performance of our proposed C-PINN.
572
+ 0
573
+ 2
574
+ 4
575
+ 6
576
+ 8
577
+ 10
578
+ t
579
+ 0
580
+ 1
581
+ 2
582
+ 3
583
+ x
584
+ ��� ������������������ uˆ(x, t)
585
+ 130 training data������
586
+ 20 training data ������
587
+ 0.0
588
+ 0.5
589
+ 1.0
590
+ 1.5
591
+ 2.0
592
+ 2.5
593
+ 3.0
594
+ 3.5
595
+ 0
596
+ 2
597
+ x
598
+ 0
599
+ 1
600
+ 2
601
+ 3
602
+ u(x, t)
603
+ ��� t = 3
604
+ 0
605
+ 2
606
+ x
607
+ 0
608
+ 1
609
+ 2
610
+ 3
611
+ u(x, t)
612
+ ��� t = 6
613
+ 0
614
+ 2
615
+ x
616
+ 0
617
+ 1
618
+ 2
619
+ 3
620
+ u(x, t)
621
+ ��� t = 9
622
+ �������������
623
+ Prediction
624
+ Fig. 2.
625
+ (a) Predictions ˆu (x, t) for the 1-D heat equation with Neumann
626
+ boundary condition. (b), (c), and (d) Comparisons of the ground truths and
627
+ predictions correspond to the fixed-time t= 3, 6, and 9 snapshots depicted by
628
+ the dashed vertical lines in (a), respectively.
629
+ TABLE II
630
+ Evaluation criteria for the three temporal snapshots depicted by the dashed
631
+ vertical lines in Fig. 2-(a).
632
+ Criteria
633
+ 3
634
+ 6
635
+ 9
636
+ RMSE
637
+ 5.343142e-02
638
+ 5.884118e-02
639
+ 7.064205e-02
640
+ CC
641
+ 9.982448e-01
642
+ 9.990231e-01
643
+ 9.984719e-01
644
+ B. Case 2: 1-D Wave Equation
645
+ The wave equation is as the following
646
+ ∂2u
647
+ ∂t2 = a2 ∂2u
648
+ ∂x2 + g(x, t),
649
+ 0 < x < L, t > 0
650
+ u|x=0 = 0,
651
+ u|x=L = 0,
652
+ t > 0
653
+ u|t=0 = 0,
654
+ ∂u
655
+ ∂t
656
+ �����t=0
657
+ = 0,
658
+ 0 ⩽ x ⩽ L,
659
+ (14)
660
+
661
+ 5
662
+ where the wave speed a is 1, the length of bounded string L
663
+ is π, the time of wave propagation t is 6, the external force is
664
+ g(x, t) = sin 2πx
665
+ L sin 2aπt
666
+ L
667
+ at(x, t) and displacement u(x, t) at (x, t) according to [37] is
668
+ further investigated.
669
+ In this experiment, NetU is of three hidden layers consisting
670
+ of 30 neurons individually. NetG is of eight hidden layers
671
+ consisting of 20 units individually. A total of 210 training
672
+ data (x, t, u (x, t)) in D, including 50 initial training data, 120
673
+ boundary training data, and 40 collation points are randomly
674
+ sampled. Fig. 3(a) shows the sparse training dataset and the
675
+ magnitude of displacement ˆu(x, t) at (x, t). Fig. 3(b) to (d)
676
+ show the comparisons of ground truths and predictions corre-
677
+ sponding to the three fixed-time t=1.5, 3, and 4.5, which are
678
+ depicted by the dashed vertical lines in Fig. 3(a), respectively.
679
+ RMSE is 7.068626e − 02 and the correlation coefficient is
680
+ 9.864411e− 01. The evaluation criteria for the three temporal
681
+ snapshots are listed in Table III.
682
+ 0
683
+ 1
684
+ 2
685
+ 3
686
+ 4
687
+ 5
688
+ 6
689
+
690
+ 0
691
+ 1
692
+ 2
693
+ 3
694
+ x
695
+ (�� ����������������� ˆu(x, t)
696
+ 220 training data �����
697
+ 40 training data �����
698
+ −1.0
699
+ −0.5
700
+ 0.0
701
+ 0.5
702
+ 1.0
703
+ 0
704
+ 2
705
+ x
706
+ −1
707
+ 0
708
+ 1
709
+ u(x, t)
710
+ �b� �������
711
+ 0
712
+ 2
713
+ x
714
+ −1
715
+ 0
716
+ 1
717
+ u(x, t)
718
+ ��� �����
719
+ 0
720
+ 2
721
+ x
722
+ −1
723
+ 0
724
+ 1
725
+ u(x, t)
726
+ ��� �������
727
+ �������������
728
+ ����������
729
+ Fig. 3.
730
+ (a) Predictions ˆu (x, t) for 1-D wave equation. (b), (c), and (d)
731
+ Comparisons of the ground truths and predictions corresponding to the fixed-
732
+ time t=1.5, 3, and 4.5 snapshots depicted by the dashed vertical lines in (a),
733
+ respectively.
734
+ TABLE III
735
+ Evaluation criteria for the three temporal snapshots depicted by the dashed
736
+ vertical lines in Fig. 3-(a).
737
+ Criteria
738
+ 1.5
739
+ 3.
740
+ 4.5
741
+ RMSE
742
+ 1.424030e-01
743
+ 3.305190e-02
744
+ 5.201132e-02
745
+ CC
746
+ 9.6238994e-01
747
+ 9.985312e-01
748
+ 9.983170e-01
749
+ C. Case 3: 2-D Poisson Equation
750
+ We further consider the following 2-D Poisson equation
751
+ ∂2u
752
+ ∂x2 + ∂2u
753
+ ∂y2 = T0,
754
+ 0 < x < a, 0 < y < b
755
+ u(x, 0) = 0,
756
+ u(x, b) = T,
757
+ 0 ⩽ x ⩽ a
758
+ u(0, y) = 0,
759
+ u(a, y) = 0,
760
+ 0 ⩽ y ⩽ b,
761
+ (15)
762
+ where T is 1, the constant source term T0 = 1 is unknown, and
763
+ a = b = 1. The analytical solution u(x, y) to (15) is obtained
764
+ according to [37]. In this experiment, the setting-ups of C-
765
+ PINN are as follows. There are eight hidden layers with 20
766
+ units in each of them for both NetU and NetG. Thirty training
767
+ data in DB and 3 collocation points in DI are used. Fig. 4(a)
768
+ shows the sparse training dataset and the predictions ˆu(x, y).
769
+ Fig. 4(b) to (d) show the prediction performance of fixed-
770
+ location y=0.2, 0.4, and 0.6 snapshots depicted in Fig. 4(a),
771
+ respectively. RMSE is 1.594000e − 02 and the correlation
772
+ coefficient is 9.997390e − 01. The corresponding evaluation
773
+ criteria are listed in Table IV.
774
+ 0.0
775
+ 0.2
776
+ 0.4
777
+ 0.6
778
+ 0.8
779
+ 1.0
780
+ 0.00
781
+ 0.25
782
+ 0.50
783
+ 0.75
784
+ 1.00
785
+ x
786
+ ��� ���������������������uˆ(x, y)
787
+ 30 training data ������
788
+ 3 training data �����
789
+ 0.0
790
+ 0.5
791
+ 1.0
792
+ 1.5
793
+ 2.0
794
+ 2.5
795
+ 0
796
+ 1
797
+ x
798
+ 0.0
799
+ 0.5
800
+ 1.0
801
+ 1.5
802
+ u(x, y)
803
+ 0
804
+ 1
805
+ x
806
+ 0.0
807
+ 0.5
808
+ 1.0
809
+ 1.5
810
+ u(x, y)
811
+ 0
812
+ 1
813
+ x
814
+ 0.0
815
+ 0.5
816
+ 1.0
817
+ 1.5
818
+ u(x, y)
819
+ (��� �����
820
+ �������������
821
+ ����������
822
+ y
823
+ y
824
+ (c�� ����4
825
+ y
826
+ (b�� ����2
827
+ y
828
+ Fig. 4.
829
+ (a) Predictions ˆu (x, y) for the 2-D Poisson equation. (b), (c), and,
830
+ (d) Comparisons of the ground truths and predictions corresponding to the
831
+ fixed-location y = 0.2, 0.4, and 0.6 snapshots depicted by the dashed vertical
832
+ lines in (a), respectively.
833
+ TABLE IV
834
+ Evaluation criteria for the three fixed-location snapshots depicted by the
835
+ dashed vertical lines in Fig. 4-(a).
836
+ Criteria
837
+ 0.2
838
+ 0.4
839
+ 0.6
840
+ RMSE
841
+ 1.763408e-02
842
+ 1.139888e-02
843
+ 7.696680e-03
844
+ CC
845
+ 9.986055e-01
846
+ 9.999703e-01
847
+ 9.999656e-01
848
+ D. Case 4: 3-D Helmholtz Equation
849
+ C-PINN is also applied to solve 3-D Helmholtz equation
850
+ with an unknown source term. In particular, we consider the
851
+ same test PDEs that were previously suggested in [26]
852
+ ∆u(x) + p2u(x) = g(x) in Ω ⊂ R3
853
+ u(x) = u0(x) on ∂Ω,
854
+ (16)
855
+ where ∆ =
856
+
857
+ ∂x2 + ∂
858
+ ∂y2 + ∂
859
+ ∂z2 is Laplacian operator, x = (x, y, z)⊤
860
+ is coordinates with x, y, z ∈ (0, 1/4] , p = 5 is the wavenumber,
861
+
862
+ 6
863
+ a suitable g(x) is the right-hand side of (16) so that
864
+ u(x) = (0.1 sin (2πx) + tanh (10x)) sin (2πy)sin (2πz)
865
+ is the analytical solution of (16) [26]. In this experiment,
866
+ NetU is of three hidden layers consisting of 100, 50, and
867
+ 50 neurons individually. NetG is of eight hidden layers con-
868
+ sisting of 20 units individually. Sixty training data and 120
869
+ collocation points are sampled. Fig. 5(a) shows the solution
870
+ of (x, y, z = 0.12) snapshot. Furthermore, Fig. 5(b) to (d)
871
+ show the comparisons of ground truths and predictions, which
872
+ are extracted at (x = 0.05, z = 0.12), (x = 0.15, z = 0.12),
873
+ and (x = 0.2, z = 0.12), respectively. The evaluation criteria
874
+ for this extractions are listed in Table V. In this experiment,
875
+ RMSE is 1.192859e − 01, and the correlation coefficient is
876
+ 9.057524e − 01.
877
+ 0.05
878
+ 0.10
879
+ 0.15
880
+ 0.20
881
+ 0.25
882
+ x
883
+ 0.1
884
+ 0.2
885
+ y
886
+ (a) 3−D Helmholtz Equation
887
+ 0.0
888
+ 0.1
889
+ 0.2
890
+ 0.3
891
+ 0.4
892
+ 0.5
893
+ 0.6
894
+ 0.7
895
+ 0.10.2
896
+ y
897
+ 0.0
898
+ 0.2
899
+ 0.4
900
+ 0.6
901
+ u(x, y)
902
+ �b� x = 0.05
903
+ 0.10.2
904
+ y
905
+ 0.0
906
+ 0.2
907
+ 0.4
908
+ 0.6
909
+ u(x, y)
910
+ ��� x = 0.15
911
+ 0.10.2
912
+ y
913
+ 0.0
914
+ 0.2
915
+ 0.4
916
+ 0.6
917
+ u(x, y)
918
+ ��� x =0.2
919
+ �������������
920
+ Prediction
921
+ �uˆ(x,�y,�z�=�25
922
+ 3�)
923
+ Fig. 5. (a) Predictions ˆu (x, y, z = 0.12) for 3-D Helmholtz equation. (b), (c)
924
+ and, (d) Comparisons of the ground truths and predictions corresponding to
925
+ the (x = 0.05, z = 0.12), (x = 0.15, z = 0.12), and (x = 0.20, z = 0.12)
926
+ snapshots depicted by the dashed vertical lines in (a), respectively.
927
+ TABLE V
928
+ Evaluation criteria for the three snapshots depicted by the dashed vertical
929
+ lines in Fig. 5-(a).
930
+ Criteria
931
+ 0.05
932
+ 0.15
933
+ 0.2
934
+ RMSE
935
+ 7.043735e-02
936
+ 7.548533e-02
937
+ 5.179414e-02
938
+ CC
939
+ 9.604538e-01
940
+ 9.998589e-01
941
+ 9.964517e-01
942
+ V. Conclusion
943
+ This paper proposes a novel PINN, called C-PINN, to solve
944
+ PDEs with less prior information or even without any prior
945
+ information for source terms. In our approach, two neural net-
946
+ works, NetU and NetG, are proposed with a fully-connected
947
+ structure. NetU for approximating the solution satisfying
948
+ PDEs under study; NetG for regularizing the training of NetU.
949
+ Then, the two networks are integrated into a data-physics-
950
+ hybrid cost function. Furthermore, the two networks are op-
951
+ timized and coupled by the proposed hierarchical training
952
+ strategy. Finally, C-PINN is applied to solve several classical
953
+ PDEs to testify to its performance. Note that C-PINN inherits
954
+ the advantages of PINN, such as sparse property and automatic
955
+ differential. C-PINN is proposed to solve such a dilemma as
956
+ the governing equation of dynamical systems with unknown
957
+ forces. Thus, C-PINN can be further applied to infer the
958
+ unknown source terms. Meanwhile, C-PINN can be extended
959
+ to identify the operators from the sparse measurements.
960
+ In the future, we will continue to use our C-PINN in
961
+ various scenarios, like solving PDEs with unknown struc-
962
+ ture parameters and high-dimension PDEs. For the case, the
963
+ structures of PDE are totally unknown, regularization method
964
+ will be combined with C-PINN to select operators from
965
+ the sparse measurements. Our proposed C-PINN has been
966
+ shown to solve several classical PDEs successfully. For more
967
+ complex situations, the features extraction, like convolution
968
+ and pooling, will be added to C-PINN.
969
+ References
970
+ [1] H. W. Wyld and G. Powell, Mathematical methods for physics.
971
+ CRC
972
+ Press, 2020.
973
+ [2] C. Oszkinat, S. E. Luczak, and I. Rosen, “Uncertainty quantification in
974
+ estimating blood alcohol concentration from transdermal alcohol level
975
+ with physics-informed neural networks,” IEEE Transactions on Neural
976
+ Networks and Learning Systems, 2022.
977
+ [3] J. Tu, C. Liu, and P. Qi, “Physics-informed neural network integrating
978
+ pointnet-based adaptive refinement for investigating crack propagation
979
+ in industrial applications,” IEEE Transactions on Industrial Informatics,
980
+ pp. 1–9, 2022.
981
+ [4] J. Sirignano and K. Spiliopoulos, “Dgm: A deep learning algorithm for
982
+ solving partial differential equations,” Journal of computational physics,
983
+ vol. 375, pp. 1339–1364, 2018.
984
+ [5] S. E. Cohn, “Dynamics of short-term univariate forecast error covari-
985
+ ances,” Monthly Weather Review, vol. 121, no. 11, pp. 3123–3149, 1993.
986
+ [6] K. Kashinath, M. Mustafa, A. Albert, J. Wu, C. Jiang, S. Esmaeilzadeh,
987
+ K. Azizzadenesheli, R. Wang, A. Chattopadhyay, A. Singh et al.,
988
+ “Physics-informed machine learning: case studies for weather and
989
+ climate modelling,” Philosophical Transactions of the Royal Society A,
990
+ vol. 379, no. 2194, p. 20200093, 2021.
991
+ [7] G. D. Smith, G. D. Smith, and G. D. S. Smith, Numerical solution
992
+ of partial differential equations: finite difference methods.
993
+ Oxford
994
+ university press, 1985.
995
+ [8] Z. Li, Z. Qiao, and T. Tang, Numerical solution of differential equations:
996
+ introduction to finite difference and finite element methods. Cambridge
997
+ University Press, 2017.
998
+ [9] G. Dziuk and C. M. Elliott, “Finite element methods for surface pdes,”
999
+ Acta Numerica, vol. 22, pp. 289–396, 2013.
1000
+ [10] J. Peir´o and S. Sherwin, “Finite difference, finite element and finite
1001
+ volume methods for partial differential equations,” in Handbook of
1002
+ materials modeling.
1003
+ Springer, 2005, pp. 2415–2446.
1004
+ [11] P. F. Antonietti, A. Cangiani, J. Collis, Z. Dong, E. H. Georgoulis, S. Gi-
1005
+ ani, and P. Houston, “Review of discontinuous galerkin finite element
1006
+ methods for partial differential equations on complicated domains,” in
1007
+ Building bridges: connections and challenges in modern approaches to
1008
+ numerical partial differential equations.
1009
+ Springer, 2016, pp. 281–310.
1010
+ [12] P. G. Ciarlet, The finite element method for elliptic problems.
1011
+ SIAM,
1012
+ 2002.
1013
+ [13] S. Cuomo, V. S. Di Cola, F. Giampaolo, G. Rozza, M. Raissi, and F. Pic-
1014
+ cialli, “Scientific machine learning through physics-informed neural net-
1015
+ works: Where we are and what’s next,” arXiv preprint arXiv:2201.05624,
1016
+ 2022.
1017
+ [14] N. Zobeiry and K. D. Humfeld, “A physics-informed machine learning
1018
+ approach for solving heat transfer equation in advanced manufacturing
1019
+ and engineering applications,” Engineering Applications of Artificial
1020
+ Intelligence, vol. 101, p. 104232, 2021.
1021
+ [15] W. Chen, Q. Wang, J. S. Hesthaven, and C. Zhang, “Physics-informed
1022
+ machine learning for reduced-order modeling of nonlinear problems,”
1023
+ Journal of computational physics, vol. 446, p. 110666, 2021.
1024
+
1025
+ 7
1026
+ [16] M. Ye, J. Shen, G. Lin, T. Xiang, L. Shao, and S. C. Hoi, “Deep learning
1027
+ for person re-identification: A survey and outlook,” IEEE transactions on
1028
+ pattern analysis and machine intelligence, vol. 44, no. 6, pp. 2872–2893,
1029
+ 2021.
1030
+ [17] D. Nurseitov, K. Bostanbekov, M. Kanatov, A. Alimova, A. Abdallah,
1031
+ and G. Abdimanap, “Classification of handwritten names of cities and
1032
+ handwritten text recognition using various deep learning models,” arXiv
1033
+ preprint arXiv:2102.04816, 2021.
1034
+ [18] L. Deng, J. Li, J.-T. Huang, K. Yao, D. Yu, F. Seide, M. Seltzer,
1035
+ G. Zweig, X. He, J. Williams et al., “Recent advances in deep learning
1036
+ for speech research at microsoft,” in 2013 IEEE international conference
1037
+ on acoustics, speech and signal processing. IEEE, 2013, pp. 8604–8608.
1038
+ [19] A. J. Meade Jr and A. A. Fernandez, “The numerical solution of
1039
+ linear ordinary differential equations by feedforward neural networks,”
1040
+ Mathematical and Computer Modelling, vol. 19, no. 12, pp. 1–25, 1994.
1041
+ [20] I. E. Lagaris, A. Likas, and D. I. Fotiadis, “Artificial neural networks for
1042
+ solving ordinary and partial differential equations,” IEEE transactions on
1043
+ neural networks, vol. 9, no. 5, pp. 987–1000, 1998.
1044
+ [21] I. E. Lagaris, A. C. Likas, and D. G. Papageorgiou, “Neural-network
1045
+ methods for boundary value problems with irregular boundaries,” IEEE
1046
+ Transactions on Neural Networks, vol. 11, no. 5, pp. 1041–1049, 2000.
1047
+ [22] M. Raissi, P. Perdikaris, and G. E. Karniadakis, “Physics-informed
1048
+ neural networks: A deep learning framework for solving forward and
1049
+ inverse problems involving nonlinear partial differential equations,”
1050
+ Journal of Computational physics, vol. 378, pp. 686–707, 2019.
1051
+ [23] Z. Mao, A. D. Jagtap, and G. E. Karniadakis, “Physics-informed
1052
+ neural networks for high-speed flows,” Computer Methods in Applied
1053
+ Mechanics and Engineering, vol. 360, p. 112789, 2020.
1054
+ [24] Q. He, D. Barajas-Solano, G. Tartakovsky, and A. M. Tartakovsky,
1055
+ “Physics-informed neural networks for multiphysics data assimilation
1056
+ with application to subsurface transport,” Advances in Water Resources,
1057
+ vol. 141, p. 103610, 2020.
1058
+ [25] M. Raissi, P. Perdikaris, and G. E. Karniadakis, “Physics informed deep
1059
+ learning (part ii): Data-driven discovery of nonlinear partial differential
1060
+ equations. arxiv e-prints, p,” arXiv preprint arXiv:1711.10566, 2017.
1061
+ [26] Z. Fang, “A high-efficient hybrid physics-informed neural networks
1062
+ based on convolutional neural network,” IEEE Transactions on Neural
1063
+ Networks and Learning Systems, 2021.
1064
+ [27] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley,
1065
+ S. Ozair, A. Courville, and Y. Bengio, “Generative adversarial networks,”
1066
+ Communications of the ACM, vol. 63, no. 11, pp. 139–144, 2020.
1067
+ [28] L. Yang, D. Zhang, and G. E. Karniadakis, “Physics-informed generative
1068
+ adversarial networks for stochastic differential equations,” SIAM Journal
1069
+ on Scientific Computing, vol. 42, no. 1, pp. A292–A317, 2020.
1070
+ [29] L. Yang, X. Meng, and G. E. Karniadakis, “B-pinns: Bayesian physics-
1071
+ informed neural networks for forward and inverse pde problems with
1072
+ noisy data,” Journal of Computational Physics, vol. 425, p. 109913,
1073
+ 2021.
1074
+ [30] M. Yang and J. T. Foster, “Multi-output physics-informed neural
1075
+ networks for forward and inverse pde problems with uncertainties,”
1076
+ Computer Methods in Applied Mechanics and Engineering, p. 115041,
1077
+ 2022.
1078
+ [31] H. Gao, M. J. Zahr, and J.-X. Wang, “Physics-informed graph neural
1079
+ galerkin networks: A unified framework for solving pde-governed for-
1080
+ ward and inverse problems,” Computer Methods in Applied Mechanics
1081
+ and Engineering, vol. 390, p. 114502, 2022.
1082
+ [32] S. Karimpouli and P. Tahmasebi, “Physics informed machine learning:
1083
+ Seismic wave equation,” Geoscience Frontiers, vol. 11, no. 6, pp. 1993–
1084
+ 2001, 2020.
1085
+ [33] T. Verhulst, D. Judt, C. Lawson, Y. Chung, O. Al-Tayawe, and G. Ward,
1086
+ “Review for state-of-the-art health monitoring technologies on air-
1087
+ frame fuel pumps,” International Journal of Prognostics and Health
1088
+ Management, vol. 13, no. 1, 2022.
1089
+ [34] K. W. Morton and D. F. Mayers, Numerical solution of partial differential
1090
+ equations: an introduction.
1091
+ Cambridge university press, 2005.
1092
+ [35] A. G. Baydin, B. A. Pearlmutter, A. A. Radul, and J. M. Siskind,
1093
+ “Automatic differentiation in machine learning: a survey,” Journal of
1094
+ Marchine Learning Research, vol. 18, pp. 1–43, 2018.
1095
+ [36] C. Zhu, R. H. Byrd, P. Lu, and J. Nocedal, “Algorithm 778: L-bfgs-
1096
+ b: Fortran subroutines for large-scale bound-constrained optimization,”
1097
+ ACM Transactions on mathematical software (TOMS), vol. 23, no. 4,
1098
+ pp. 550–560, 1997.
1099
+ [37] B. R. Kusse and E. A. Westwig, Mathematical physics: applied
1100
+ mathematics for scientists and engineers.
1101
+ John Wiley & Sons, 2010.
1102
+
39FAT4oBgHgl3EQflh2J/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
3dFST4oBgHgl3EQfYzh6/content/tmp_files/2301.13789v1.pdf.txt ADDED
@@ -0,0 +1,1063 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.13789v1 [math.CO] 31 Jan 2023
2
+ The Minimum Degree Removal Lemma Thresholds
3
+ Lior Gishboliner∗
4
+ Zhihan Jin∗
5
+ Benny Sudakov∗
6
+ Abstract
7
+ The graph removal lemma is a fundamental result in extremal graph theory which says that for every
8
+ fixed graph H and ε > 0, if an n-vertex graph G contains εn2 edge-disjoint copies of H then G contains
9
+ δnv(H) copies of H for some δ = δ(ε, H) > 0. The current proofs of the removal lemma give only very
10
+ weak bounds on δ(ε, H), and it is also known that δ(ε, H) is not polynomial in ε unless H is bipartite.
11
+ Recently, Fox and Wigderson initiated the study of minimum degree conditions guaranteeing that δ(ε, H)
12
+ depends polynomially or linearly on ε. In this paper we answer several questions of Fox and Wigderson
13
+ on this topic.
14
+ 1
15
+ Introduction
16
+ The graph removal lemma, first proved by Ruzsa and Szemerédi [23], is a fundamental result in extremal
17
+ graph theory. It also have important applications to additive combinatorics and property testing. The lemma
18
+ states that for every fixed graph H and ε > 0, if an n-vertex graph G contains εn2 edge-disjoint copies of H
19
+ then G it contains δnv(H) copies of H, where δ = δ(ε, H) > 0. Unfortunately, the current proofs of the graph
20
+ removal lemma give only very weak bounds on δ = δ(ε, H) and it is a very important problem to understand
21
+ the dependence of δ on ε. The best known result, due to Fox [11], proves that 1/δ is at most a tower of
22
+ exponents of height logarithmic in 1/ε. Ideally, one would like to have better bounds on 1/δ, where an
23
+ optimal bound would be that δ is polynomial in ε. However, it is known [2] that δ(ε, H) is only polynomial
24
+ in ε if H is bipartite. This situation led Fox and Wigderson [12] to initiate the study of minimum degree
25
+ conditions which guarantee that δ(ε, H) depends polynomially or linearly on ε. Formally, let δ(ε, H; γ) be
26
+ the maximum δ ∈ [0, 1] such that if G is an n-vertex graph with minimum degree at least γn and with εn2
27
+ edge-disjoint copies of H, then G contains δnv(H) copies of H.
28
+ Definition 1.1. Let H be a graph.
29
+ 1. The linear removal threshold of H, denoted δlin-rem(H), is the infimum γ such that δ(ε, H; γ) depends
30
+ linearly on ε, i.e. δ(ε, H; γ) ≥ µε for some µ = µ(γ) > 0 and all ε > 0.
31
+ 2. The polynomial removal threshold of H, denoted δpoly-rem(H), is the infimum γ such that δ(ε, H; γ)
32
+ depends polynomially on ε, i.e. δ(ε, H; γ) ≥ µε1/µ for some µ = µ(γ) > 0 and all ε > 0.
33
+ Trivially, δlin-rem(H) ≥ δpoly-rem(H).
34
+ Fox and Wigderson [12] initiated the study of δlin-rem(H) and
35
+ δpoly-rem(H), and proved that δlin-rem(Kr) = δpoly-rem(Kr) = 2r−5
36
+ 2r−3 for every r ≥ 3, where Kr is the clique on
37
+ r vertices. They further asked to determine the removal lemma thresholds of odd cycles. Here we completely
38
+ resolve this question. The following theorem handles the polynomial removal threshold.
39
+ Theorem 1.2. δpoly-rem(C2k+1) =
40
+ 1
41
+ 2k+1.
42
+ Theorem 1.2 also answers another question of Fox and Wigderson [12], of whether δlin-rem(H) and
43
+ δpoly-rem(H) can only obtain finitely many values on r-chromatic graphs H for a given r ≥ 3. Theorem 1.2
44
+ shows that δpoly-rem(H) obtains infinitely many values for 3-chromatic graphs. In contrast, δlin-rem(H) ob-
45
+ tains only three possible values for 3-chromatic graphs. Indeed, the following theorem determines δlin-rem(H)
46
+ for every 3-chromatic H. An edge xy of H is called critical if χ(H − xy) < χ(H).
47
+ ∗Department of Mathematics, ETH, Zürich, Switzerland. Research supported in part by SNSF grant 200021_196965. Email:
48
+ {lior.gishboliner, zhihan.jin, benjamin.sudakov}@math.ethz.ch.
49
+ 1
50
+
51
+ Theorem 1.3. For a graph H with χ(H) = 3, it holds that
52
+ δlin-rem(H) =
53
+
54
+
55
+
56
+
57
+
58
+ 1
59
+ 2
60
+ H has no critical edge,
61
+ 1
62
+ 3
63
+ H has a critical edge and contains a triangle,
64
+ 1
65
+ 4
66
+ H has a critical edge and odd-girth(H) ≥ 5.
67
+ Theorems 1.2 and 1.3 show a separation between the polynomial and linear removal thresholds, giving a
68
+ sequence of graphs (i.e. C5, C7, . . . ) where the polynomial threshold tends to 0 while the linear threshold is
69
+ constant 1
70
+ 4.
71
+ The parameters δpoly-rem and δlin-rem are related to two other well-studied minimum degree thresholds:
72
+ the chromatic threshold and the homomorphism threshold. The chromatic threshold of a graph H is the
73
+ infimum γ such that every n-vertex H-free graph G with δ(G) ≥ γn has bounded cromatic number, i.e.,
74
+ there exists C = C(γ) such that χ(G) ≤ C. The study of the chromatic threshold originates in the work of
75
+ Erdős and Simonovits [10] from the ’70s. Following multiple works [4, 15, 16, 7, 5, 25, 26, 19, 6, 14, 20], the
76
+ chromatic threshold of every graph was determined by Allen et al. [1].
77
+ Moving on to the homomorphism threshold, we define it more generally for families of graphs. The
78
+ homomorphism threshold of a graph-family H, denoted δhom(H), is the infimum γ for which there exists an
79
+ H-free graph F = F(γ) such that every n-vertex H-free graph G with δ(G) ≥ γn is homomorphic to F.
80
+ When H = {H}, we write δhom(H). This parameter was widely studied in recent years [18, 22, 17, 8, 24].
81
+ It turns out that δhom is closely related to δpoly-rem(H), as the following theorem shows. For a graph H, let
82
+ IH denote the set of all minimal (with respect to inclusion) graphs H′ such that H is homomorphic to H′.
83
+ Theorem 1.4. For every graph H, δpoly-rem(H) ≤ δhom(IH).
84
+ Note that IC2k+1 = {C3, . . . , C2k+1}. Using this, the upper bound in Theorem 1.2 follows immediately
85
+ by combining Theorem 1.4 with the result of Ebsen and Schacht [8] that δhom({C3, . . . , C2k+1}) =
86
+ 1
87
+ 2k+1.
88
+ The lower bound in Theorem 1.2 was established in [12]; for completeness, we sketch the proof in Section 3.
89
+ The rest of this short paper is organized as follows. Section 2 contains some preliminary lemmas. In
90
+ Section 3 we prove the lower bounds in Theorems 1.2 and 1.3. Section 4 gives the proof of Theorem 1.4, and
91
+ Section 5 gives the proof of the upper bounds in Theorem 1.3. In the last section we discuss further related
92
+ problems.
93
+ 2
94
+ Preliminaries
95
+ Throughout this paper, we always consider labeled copies of some fixed graph H and write copy of H for
96
+ simplicity. We use δ(G) for the minimum degree of G, and write H → F to denote that there is a homo-
97
+ morphism from H to F. For a graph H on [h] and integers s1, s2, . . . , sh > 0, we denote by H[s1, . . . , sh]
98
+ the blow-up of H where each vertex i ∈ V (H) is replaced by a set Si of size si (and edges are replaced with
99
+ complete bipartite graphs). The following lemma is standard.
100
+ Lemma 2.1. Let H be a fixed graph on vertex set [h] and let s1, s2, . . . , sh ∈ N. There exists a constant
101
+ c = c(H, s1, . . . , sh) > 0 such that the following holds. Let G be an n-vertex graph and V1, . . . , Vh ⊆ V (G).
102
+ Suppose that G contains at least ρnh copies of H mapping i to Vi for all i ∈ [h]. Then G contains at least
103
+
104
+ 1
105
+ c · ns1+···+sh copies of H[s1, . . . , sh] mapping Si to Vi for all i ∈ [h].
106
+ Note that the sets V1, . . . , Vh in Lemma 2.1 do not have to be disjoint. The proof of Lemma 2.1 works
107
+ by defining an auxiliary h-uniform hypergraph G whose hyperedges correspond to the copies of H in which
108
+ vertex i is mapped to Vi. By assumption, G has at least ρnh edges. By the hypergraph generalization of the
109
+ Koväri-Sós-Turán theorem, see [9], G contains poly(ρ)ns1+···+sh copies of K(h)
110
+ s1,...,sh, the complete h-partite
111
+ hypergraph with parts of size s1, . . . , sh. Each copy of K(h)
112
+ s1,...,sh gives a copy of H[s1, . . . , sh] mapping Si to Vi.
113
+ Fox and Wigderson [12, Proposition 4.1] proved the following useful fact.
114
+ Lemma 2.2. If H → F and F is a subgraph of H, then δpoly-rem(H) = δpoly-rem(F).
115
+ 2
116
+
117
+ The following lemma is an asymmetric removal-type statement for odd cycles, which gives polynomial
118
+ bounds. It may be of independent interest. A similar result has appeared very recently in [13].
119
+ Lemma 2.3. For 1 ≤ ℓ < k, there exists a constant c = c(k) > 0 such that if an n-vertex graph G has εn2
120
+ edge-disjoint copies of C2ℓ+1, then it has at least cε1/cn2k+1 copies of C2k+1.
121
+ Proof. Let C be a collection of εn2 edge-disjoint copies of C2ℓ+1 in G. There exists a collection C′ ⊆ C such
122
+ that |C′| ≥ εn2/2 and each vertex v ∈ V (G) belongs to either 0 or at least εn/2 of the cycles in C′. Indeed,
123
+ to obtain C′, we repeatedly delete from C all cycles containing a vertex v which belongs to at least one but
124
+ less than εn/2 of the cycles in C (without changing the graph). The set of cycles left at the end is C′. In
125
+ this process, we delete at most εn2/2 cycles altogether (because the process lasts for at most n steps); hence
126
+ |C′| ≥ εn2/2. Let V be the set of vertices contained in at least εn/2 cycles from C′, so |V | ≥ εn/2. With
127
+ a slight abuse of notation, we may replace G with G[V ], C with C′ and ε/2 with ε, and denote |V | by n.
128
+ Hence, from now on, we assume that each vertex v ∈ V (G) is contained in at least εn of the cycles in C.
129
+ This implies that |N(v)| ≥ 2εn for every v ∈ V (G).
130
+ Fix any v0 ∈ V (G) and let C(v0) be the set of cycles C ∈ C such that C ∩ N(v0) ̸= ∅ and v0 /∈ C.
131
+ The number of cycles C ∈ C intersecting N(v0) is at least |N(v0)| · εn/(2ℓ + 1) ≥ 2ε2n2/(2ℓ + 1), and the
132
+ number of cycles containing v0 is at most n. Hence, |C(v0)| ≥ 2ε2n2/(2ℓ + 1) − n ≥ ε2n2/(ℓ + 1). Take
133
+ a random partition V0, V1, . . . , Vℓ of V (G) \ {v0}, where each vertex is put in one of the parts uniformly
134
+ and independently. For a cycle (x1, . . . , x2ℓ+1) ∈ C(v0) with xℓ+1 ∈ N(v0), say that (x1, . . . , x2ℓ+1) is good
135
+ if xℓ+1 ∈ V0 and xℓ+1−i, xℓ+1+i ∈ Vi for 1 ≤ i ≤ ℓ (so in particular x1, x2ℓ+1 ∈ Vℓ).
136
+ The probability
137
+ that (x1, . . . , x2ℓ+1) is good is 1/(ℓ + 1)2ℓ+1, so there is a collection of good cycles C′(v0) ⊆ C0 of size
138
+ |C′(v0)| ≥ |C(v0)|/(ℓ + 1)2ℓ+1 ≥ ε2n2/(ℓ + 1)2ℓ+2.
139
+ Put γ := ε2/(ℓ + 1)2ℓ+2.
140
+ By the same argument as
141
+ above, there is a collection C′′(v0) ⊆ C′(v0) with |C��′(v0)| ≥ γn2/2 such that each vertex is contained in
142
+ either 0 or at least γn/2 cycles from C′′(v0). Let W be the set of vertices contained in at least γn/2 cycles
143
+ from C′′(v0). Note that W ∩ V0 ⊆ N(v0) by definition. Also, each vertex in W ∩ Vℓ has at least γn/2
144
+ neighbors in W ∩ Vℓ, and for each 1 ≤ i ≤ ℓ, each vertex in W ∩ Vi has at least γn/2 neighbors in W ∩ Vi−1.
145
+ It follows that W ∩ Vℓ contains at least 1
146
+ 2|W ∩ Vℓ| · �2k−2ℓ−2
147
+ i=0
148
+ (γn/2 − i) = poly(γ)n2k−2ℓ paths of length
149
+ 2k − 2ℓ − 1. We now construct a collection of copies of C2k+1 as follows. Choose a path yℓ+1, yℓ+2, . . . , y2k−ℓ
150
+ of length 2k − 2ℓ − 1 in W ∩ Vℓ.
151
+ For each i = ℓ, . . . , 1, take a neighbor yi ∈ W ∩ Vi−1 of yi+1 and a
152
+ neighbor y2k−i+1 ∈ W ∩ Vi−1 of y2k−i, such that the vertices y1, . . . , y2k are all different. Then y1, . . . , y2k
153
+ is a path and y1, y2k ∈ W ∩ V0 ⊆ N(v0), so v0, y1, . . . , y2k is a copy of C2ℓ+1. The number of choices for
154
+ the path yℓ+1, yℓ+2, . . . , y2k−ℓ is poly(γ)n2k−2ℓ and the number of choices for each vertex yi, y2k−i+1 ∈ Vi−1
155
+ (i = ℓ, . . . , 1) is at least γn/2. Hence, the total number of choices for y1, . . . , y2k is poly(γ)n2k. As there are
156
+ n choices for v0, we get a total of poly(γ)n2k+1 = polyk(ε)n2k+1 copies of C2k+1, as required.
157
+ 3
158
+ Lower bounds
159
+ Here we prove the lower bounds in Theorems 1.2 and 1.3. The lower bound in Theorem 1.2 was proved in
160
+ [12, Theorem 4.3]. For completeness, we include a sketch of the proof:
161
+ Lemma 3.1. δpoly-rem(C2k+1) ≥
162
+ 1
163
+ 2k+1.
164
+ Proof. Fix an arbitrary α > 0. In [2] it was proved that for every ε, there exists a (2k + 1)-partite graph
165
+ with parts V1, . . . , V2k+1 of size αn/(2k + 1) each, with εn2 edge-disjoint copies of C2k+1, but with only
166
+ εω(1)n2k+1 copies of C2k+1 in total (where the ω(1) term may depend on α). Add sets U1, . . . , U2k+1 of size
167
+ (1 − α)n/(2k + 1) each, and add the complete bipartite graphs (Ui, Vi), 1 ≤ i ≤ 2k + 1, and (Ui, Ui+1),
168
+ 1 ≤ i ≤ 2k. See Figure 1. It is easy to see that this graph has minimum degree (1 − α)n/(2k + 1), and every
169
+ copy of C2k+1 is contained in V1 ∪ · · · ∪ V2k+1. Letting α → 0, we get that δpoly-rem(C2k+1) ≥
170
+ 1
171
+ 2k+1.
172
+ By combining the fact that δpoly-rem(C3) = 1
173
+ 3 with Lemma 2.2 (with F = C3), we get that δlin-rem(H) ≥
174
+ δpoly-rem(H) = 1
175
+ 3 for every 3-chromatic graph H containing a triangle. This proves the lower bound in the
176
+ second case of Theorem 1.3. Now we prove the lower bounds in the other two cases. We prove a more general
177
+ statement for r-chromatic graphs.
178
+ 3
179
+
180
+ V2
181
+ V3
182
+ V4
183
+ V5
184
+ V1
185
+ U2
186
+ U3
187
+ U4
188
+ U5
189
+ U1
190
+ Figure 1: Proof of Lemma 3.1 for C5. Heavy edges indicate complete bipartite graphs while dashed edges
191
+ form the Ruzsa–Szemerédi construction for C5 (see [2]).
192
+ Lemma 3.2. Let H be a graph with χ(H) = r ≥ 3.
193
+ Then,
194
+ 3r−8
195
+ 3r−5 ≤ δlin-rem(H) ≤
196
+ r−2
197
+ r−1.
198
+ Moreover,
199
+ δlin-rem(H) = r−2
200
+ r−1 if H contains no critical edge.
201
+ Proof. Denote h = |V (H)|. The bound δlin-rem(H) ≤ r−2
202
+ r−1 holds for every r-chromatic graph H; this follows
203
+ from the Erdős-Simonovits supersaturation theorem, see by [12, Section 4.1] for the details.
204
+ Suppose now that H contains no critical edge, and let us show that δlin-rem(H) ≥ r−2
205
+ r−1. To this end, we
206
+ construct, for every small enough ε and infinitely many n, an n-vertex graph G with δ(G) ≥ r−2
207
+ r−1n, such that
208
+ G has at most O(ε2nh) copies of H, but Ω(εn2) edges must be deleted to turn G into an H-free graph. Let
209
+ T (n, r − 1) be the Turán graph, i.e. the complete (r − 1)-partite graph with balanced parts V1, . . . , Vr−1.
210
+ Add an εn-regular graph inside V1 and let the resulting graph be G. We first claim that G contains O(ε2nh)
211
+ copies of H. As H contains no critical edge and χ(H) = r, every copy of H in G contains two edges e and
212
+ e′ inside V1. If e and e′ are disjoint, then there are at most n2(εn)2 = ε2n4 choices for e and e′ and then at
213
+ most nh−4 choices for the other h− 4 vertices of H. Therefore, there are at most ε2nh such H-copies. And if
214
+ e and e′ intersect, then there are at most n(εn)2 = ε2n3 choices for e and e′ and then at most nh−3 choices
215
+ for the remaining vertices, again giving at most ε2nh such H-copies. So G indeed has O(ε2nh) copies of H.
216
+ On the other hand, we claim that one must delete Ω(εn2) edges to destroy all H-copies in G. Observe
217
+ that G has at least 1
218
+ 2 |V1|·εn·|V2|·· · ··|Vr−1| = Ωr(εnr) copies of Kr, and every edge participates in at most
219
+ nr−2 of these copies. Thus, deleting cεn2 edges can destroy at most cεnr copies of Kr. If c is a small enough
220
+ constant (depending on r), then after deleting any cεn2 edges, there are still Ω(εnr) copies of Kr. Then,
221
+ by Lemma 2.1, the remaining graph contains Kr[h], the h-blowup of Kr, and hence H. This completes the
222
+ proof that δlin-rem(H) ≥ r−2
223
+ r−1.
224
+ We now prove that δlin-rem(H) ≥ 3r−8
225
+ 3r−5 for every r-chromatic graph H. It suffices to construct, for every
226
+ small enough ε and infinitely many n, an n-vertex graph G with δ(G) ≥ 3r−8
227
+ 3r−5n, such that G has at most
228
+ O(ε2nh) copies of H but at least Ω(εn2) edges must be deleted to turn G into an H-free graph. The vertex
229
+ set of G consists of r + 1 disjoint sets V0, V1, V2, . . . , Vr, where |Vi| =
230
+ n
231
+ 3r−5 for i = 0, 1, 2, 3 and |Vi| =
232
+ 3n
233
+ 3r−5
234
+ for i = 4, 5, . . ., r. Put complete bipartite graphs between V0 and V1, between V0 ∪ V1 and V4 ∪ · · · ∪ Vr, and
235
+ between Vi to Vj for all 2 ≤ i < j ≤ r. Put εn-regular bipartite graphs between V1 and V2, and between V1
236
+ and V3. The resulting graph is G (see Figure 2). It is easy check that δ(G) ≥ 3r−8
237
+ 3r−5n. Indeed, let 0 ≤ i ≤ r
238
+ and v ∈ Vi. If 4 ≤ i ≤ r then v is connected to all vertices except for Vi; if i ∈ {2, 3} then v is connected to
239
+ all vertices except V0 ∪ V1 ∪ Vi; and if i ∈ {0, 1} then v is connected to all vertices except V2 ∪ V3 ∪ Vi. In
240
+ any case, the neighborhood of v misses at most
241
+ 3n
242
+ 3r−5 vertices.
243
+ We claim that G has at most O(ε2nh) copies of H. Indeed, observe that if we delete all edges between V1
244
+ and V2 then the remaining graph is (r − 1)-colorable with coloring V1 ∪ V2, V0 ∪ V3, V4, . . . , Vr. Hence, every
245
+ copy of H must contain an edge e between V1 and V2. Similarly, every copy of H must contain an edge e′
246
+ between V1 and V3. If e, e′ are disjoint then there are at most n2(εn)2 = ε2n4 ways to choose e, e′ and then
247
+ at most nh−4 ways to choose the remaining vertices of H. And if e and e′ intersect then there are at most
248
+ n(εn)2 = ε2n3 ways to choose e, e′ and at most nh−3 for the remaining h − 3 vertices of H. In both cases,
249
+ the number of H-copies is at most ε2nh, as required.
250
+ Now we show that one must delete Ω(εn2) edges to destroy all copies of H in G. Observe that G has
251
+ |V1| · (εn)2 · |V4| · · · · · |Vr| = Ω(ε2nr) copies of Kr between the sets V1, . . . , Vr. We claim that every edge f
252
+ 4
253
+
254
+ V1
255
+ V2
256
+ V3
257
+ V0
258
+ V1
259
+ V2
260
+ V3
261
+ V4
262
+ V0
263
+ Figure 2: Proof of Lemma 3.2, r = 3 (left) and r = 4 (right). Heavy edges indicate complete bipartite graphs
264
+ while dashed edges indicate εn-regular bipartite graphs.
265
+ participates in at most εnr−2 of these r-cliques. Indeed, by the same argument as above, every copy of Kr
266
+ containing f must contain an edge e from E(V1, V2) and an edge e′ from E(V1, V3). Suppose without loss of
267
+ generality that e ̸= f (the case e′ ̸= f is symmetric). In the case f ∩ e = ∅, there are at most n · εn = εn2
268
+ choices for e and at most nr−4 choices for the remaining vertices of Kr, giving at most εnr−2 copies of Kr
269
+ containing f. And if f, e intersect, then there are at most εn choices for e and at most nr−3 for the remaining
270
+ r − 3 vertices, giving again εnr−2.
271
+ We see that deleting cεn2 edges of G can destroy at most cε2nr copies of Kr. Hence, if c is a small
272
+ enough constant, then after deleting any cεn2 edges there are still Ω(ε2nr) copies of Kr left. By Lemma 2.1,
273
+ the remaining graph contains a copy of Kr[h] and hence H. This completes the proof.
274
+ 4
275
+ Polynomial removal thresholds: Proof of Theorem 1.4
276
+ We say that an n-vertex graph G is ε-far from a graph property P (e.g. being H-free for a given graph H, or
277
+ being homomorphic to a given graph F) if one must delete at least εn2 edges to make G satisfy P. Trivially,
278
+ if G has εn2 edge-disjoint copies of H, then it is ε-far from being H-free. We need the following result from
279
+ [21].
280
+ Theorem 4.1. For every graph F on f vertices and for every ε > 0, there is q = qF (ε) = poly(f/ε), such
281
+ that the following holds. If a graph G is ε-far from being homomorphic to F, then for a sample of q vertices
282
+ x1, . . . , xq ∈ V (G), taken uniformly with repetitions, it holds that G[{x1, . . . , xq}] is not homomorphic to F
283
+ with probability at least 2
284
+ 3.
285
+ Theorem 4.1 is proved in Section 2 of [21].
286
+ In fact, [21] proves a more general result on property
287
+ testing of the so-called 0/1-partition properties. Such a property is given by an integer f and a function
288
+ d : [f]2 → {0, 1, ⊥}, and a graph G satisfies the property if it has a partition V (G) = V1 ∪ · · · ∪ Vf such
289
+ that for every 1 ≤ i, j ≤ f (possibly i = j), it holds that (Vi, Vj) is complete if d(i, j) = 1 and (Vi, Vj) is
290
+ empty if d(i, j) = 0 (if d(i, j) =⊥ then there are no restrictions). One can express the property of having a
291
+ homomorphism into F in this language, simply by setting d(i, j) = 0 for i = j and ij /∈ E(F). In [21], the
292
+ class of these partition properties is denoted GPP0,1, and every such property is shown to be testable by
293
+ sampling poly(f/ε) vertices. This implies Theorem 4.1.
294
+ Proof of Theorem 1.4. Recall that IH is the set of minimal graphs H′ (with respect to inclusion) such that
295
+ H is homomorphic to H′. For convenience, put δ := δhom(IH). Our goal is to show that δpoly-rem(H) ≤ δ+α
296
+ for every α > 0. So fix α > 0 and let G be a graph with minimum degree δ(G) ≥ (δ + α)n and with
297
+ εn2 edge-disjoint copies of H. By the definition of the homomorphism threshold, there is an IH-free graph
298
+ F (depending only on IH and α) such that if a graph G0 is IH-free and has minimum degree at least
299
+ (δ + α
300
+ 2 ) · |V (G0)|, then G0 is homomorphic to F. Observe that if a graph G0 is homomorphic to F then
301
+ G0 is H-free, because F is free of any homomorphic image of H. It follows that G is ε-far from being
302
+ homomorphic to F, because G is ε-far from being H-free. Now we apply Theorem 4.1. Let q = qF (ε) be
303
+ given by Theorem 4.1. We assume that q ≫ log(1/α)
304
+ α2
305
+ and n ≫ q2 without loss of generality. Sample q vertices
306
+ x1, . . . , xq ∈ V (G) with repetition and let X = {x1, . . . , xq}. By Theorem 4.1, G[X] is not homomorphic to
307
+ F with probability at least 2/3. As n ≫ q2, the vertices x1, . . . , xq are pairwise-distinct with probability at
308
+ least 0.99. Also, for every i ∈ [q], the number of indices j ∈ [q] \ {i} with xixj ∈ E(G) dominates a binomial
309
+ 5
310
+
311
+ distribution B(q − 1, δ(G)
312
+ n ). By the Chernoff bound (see e.g. [3, Appendix A]) and as δ(G) ≥ (δ + α)n,
313
+ the number of such indices is at least (δ + α
314
+ 2 )q with probability 1 − e−Ω(qα2). Taking the union bound over
315
+ i ∈ [q], we get that δ(G[X]) ≥ (δ + α
316
+ 2 )|X| with probability at least 1 − qe−Ω(qα2) ≥ 0.9, as q ≫ log(1/α)
317
+ α2
318
+ .
319
+ Hence, with probability at least 1
320
+ 2 it holds that δ(G[X]) ≥ (δ + α
321
+ 2 )|X| and G[X] is not homomorphic to F. If
322
+ this happens, then G[X] is not IH-free (by the choice of F), hence G[X] contains a copy of some H′ ∈ IH.
323
+ By averaging, there is H′ ∈ IH such that G[X] contains a copy of H′ with probability at least
324
+ 1
325
+ 2|IH|. Put
326
+ k = |V (H′)| and let M be the number of copies of H′ in G. The probability that G[X] contains a copy of H′
327
+ is at most M( q
328
+ n)k. Using the fact that q = polyH,α( 1
329
+ ε), we conclude that M ≥
330
+ 1
331
+ 2|IH| · ( n
332
+ q )k ≥ polyH,α(ε)nk.
333
+ As H → H′, there exists H′′, a blow-up of H′, such that H′′ have the same number of vertices as H, and
334
+ that H ⊂ H′′. By Lemma 2.1 for H′ with Vi = V (G) for all i, there exist polyH,α(ε)nv(H′′) copies of H′′ in
335
+ G, and thus polyH,α(ε)nv(H) copies of H. This completes the proof.
336
+ 5
337
+ Linear removal thresholds: Proof of Theorem 1.3
338
+ Here we prove the upper bounds in Theorem 1.3; the lower bounds were proved in Section 3. The first case
339
+ of Theorem 1.3 follows from Lemma 3.2, so it remains to prove the other two cases. We begin with some
340
+ preparation. For disjoint sets A1, . . . , Am, we write �
341
+ i∈[m] Ai × Ai+1 to denote all pairs of vertices which
342
+ have one endpoint in Ai and one in Ai+1 for some 1 ≤ i ≤ m, with subscripts always taken modulo m. So a
343
+ graph G has a homomorphism to the cycle Cm if and only if there is a partition V (G) = A1 ∪ · · · ∪ Am with
344
+ E(G) ⊆ �
345
+ i∈[m] Ai × Ai+1.
346
+ Lemma 5.1. Suppose H is a graph such that χ(H) = 3, H contains a critical edge xy, and odd-girth(H) ≥
347
+ 2k + 1. Then,
348
+ • There is a partition V (H) = A1 ·∪ A2 ·∪ A3 ·∪ B such that A1 = {x}, A2 = {y} and E(H) ⊆ (A3 × B) ∪
349
+ (�
350
+ i∈[3] Ai × Ai+1);
351
+ • if k ≥ 2, there is a partition V (H) = A1 ·∪ A2 ·∪ · · · ·∪ A2k+1 such that A1 = {x}, A2 = {y} and
352
+ E(H) ⊆ �
353
+ i∈[2k+1] Ai × Ai+1. In particular, H is homomorphic to C2k+1.
354
+ Proof. Write H′ = H − xy, so H′ is bipartite. Let V (H) = V (H′) = L ·∪ R be a bipartition of H′. As
355
+ χ(H) = 3, x and y must both lie in the same side of the bipartition. Without loss of generality, assume that
356
+ x, y ∈ L. For the first item, set A1 = {x}, A2 = {y}, A3 = R and B = L\{x, y}. Then every edge of G goes
357
+ between B and A3 or between two of the sets A1, A2, A3, as required.
358
+ Suppose now that k ≥ 2, i.e. odd-girth(H) = 2k + 1 ≥ 5. For 1 ≤ i ≤ k, let Xi be the set of vertices at
359
+ distance (i − 1) from x in H′, and let Yi be the set of vertices at distance (i − 1) from y in H′. Note that
360
+ X1 = {x} and Y1 = {y}. Also, Xi, Yi lie in L if i is odd and in R if i is even. Write
361
+ L′ := L\
362
+ k�
363
+ i=1
364
+ (Xi ∪ Yi),
365
+ R′ := R\
366
+ k�
367
+ i=1
368
+ (Xi ∪ Yi),
369
+ We first claim that {X1, . . . , Xk, Y1, . . . , Yk, L′, R′} forms a partition of V (H).
370
+ The sets X1, . . . , Xk are
371
+ clearly pairwise-disjoint, and so are Y1, . . . , Yk. Also, all of these sets are disjoint from L′, R′ by definition.
372
+ So we only need to check Xi and Yj are disjoint for every pair 1 ≤ i, j ≤ k. Suppose for contradiction that
373
+ there exists u ∈ Xi ∩ Yj for some 1 ≤ i, j ≤ k. Then i ≡ j (mod 2), because otherwise Xi, Yj are contained
374
+ in different parts of the bipartition L, R. By the definition of Xi and Yj, H′ has a path x = x1, x2, . . . , xi = u
375
+ and a path y = y1, y2, . . . , yj = u. Then, x = x1, x2, . . . , xi = u = yj, yj−1, . . . , y1, y, x forms a closed walk of
376
+ length i+j −1, which is odd as i ≡ j (mod 2). Hence, odd-girth(H) ≤ 2k−1, contradicting our assumption.
377
+ By definition, there are no edges between Xi and Xj for j − i ≥ 2, and similarly for Yi, Yj. Also, there
378
+ are no edges between L′ ∪ R′ and �k−1
379
+ i=1 (Xi ∪ Yi) because the vertices in L′ ∪ R′ are at distance more than k
380
+ to x, y. Moreover, if k is even then there are no edges between Xk ∪ Yk and R′, and if k is odd then there
381
+ are no edges between Xk ∪ Yk and L′. Next, we show that there are no edges between Xi and Yj for any
382
+ 1 ≤ i, j ≤ k except (i, j) = (1, 1). Indeed, if i = j then e(Xi, Yj) = 0 because Xi, Yj are on the same side
383
+ 6
384
+
385
+ x
386
+ y
387
+ X2
388
+ Y2
389
+ L′
390
+ R′
391
+ x
392
+ y
393
+ X2
394
+ Y2
395
+ X3
396
+ Y3
397
+ R′
398
+ L′
399
+ Figure 3: Proof of Lemma 5.1, k = 2 (left) and k = 3 (right). Edges indicate bipartite graphs where edges
400
+ can be present.
401
+ of the bipartition L, R. So suppose that i ̸= j, say i < j, and assume by contradiction that there is an
402
+ edge uv with u ∈ Xi, v ∈ Yj. Then v is at distance at most i + 1 ≤ k from x, implying that Yj intersects
403
+ X1 ∪ · · · ∪ Xi+1, a contradiction.
404
+ Finally, we define the partition A1, . . . , A2k+1 that satisfies the assertion of the second item. If k is even
405
+ then take A1, . . . , A2k+1 to be X1, Y1, . . . , Yk−1, Yk∪R′, L′, Xk, . . . , X2, and if k is odd then take A1, . . . , A2k+1
406
+ to be X1, Y1, . . . , Yk−1, Yk ∪ L′, R′, Xk, . . . , X2. See Figure 3 for an illustration. By the above, in both cases
407
+ it holds that E(H) ⊆ �
408
+ i∈[2k+1] Ai × Ai+1, as required.
409
+ For vertex u ∈ V (G), denote by NG(u) the neighborhood of u and let degG(u) = |NG(u)|. For vertices
410
+ u, v ∈ V (G), denote by NG(u, v) the common neighborhood of u, v and let degG(u, v) = |NG(u, v)|.
411
+ Lemma 5.2. Let H be a graph on h vertices such that χ(H) = 3 and H contains a critical edge xy. Let G
412
+ be a graph on n vertices with δ(G) ≥ αn. Let ab ∈ E(G) such that degG(a, b) ≥ αn. Then, there are at least
413
+ poly(α)nh−2 copies of H in G mapping xy ∈ E(H) to ab ∈ E(G).
414
+ Proof. By the first item of Lemma 5.1, there is a partition V (H) = A1 ·∪ A2 ·∪ A3 ·∪ B such that A1 =
415
+ {x}, A2 = {y} and E(H) ⊆ (A3 × B) ∪ �
416
+ i∈[3] Ai × Ai+1. Let s = |A3| and t = |B|. Each u ∈ NG(a, b) has at
417
+ least αn − 2 ≥ αn
418
+ 2 neighbors not equal to a, b. Hence, there are at least 1
419
+ 2 · |NG(a, b)| · αn
420
+ 2 ≥ α2n2
421
+ 4
422
+ edges uv
423
+ with u ∈ NG(a, b) and v /∈ {a, b}. Applying Lemma 2.1 with H = K2, V1 = NG(a, b) and V2 = V (G)\{a, b},
424
+ we see that there are poly(α)ns+t pairs of disjoint sets (S, T ) such that |S| = s, |T | = t, S ⊆ NG(a, b),
425
+ a, b /∈ T , and S, T form a complete bipartite graph in G. Given any such pair, it is safe to map x to a, y to
426
+ b, A3 to S and B to T to obtain an H-copy. Hence, G contains at least poly(α)ns+t = poly(α)nh−2 copies
427
+ of H mapping xy to ab.
428
+ Lemma 5.3. Let H be a graph on h vertices such that χ(H) = 3, H contains a critical edge xy, and
429
+ odd-girth(H) ≥ 5. Let G be a graph on n vertices, let ab ∈ E(G), and suppose that there exists A ⊂ NG(a)
430
+ and B ⊂ NG(b) such that |A| , |B| ≥ αn and |NG(a′, b′)| ≥ αn for all distinct a′ ∈ A and b′ ∈ B. Then there
431
+ are at least poly(α)nh−2 copies of H in G mapping xy ∈ E(H) to ab ∈ E(G).
432
+ Proof. By Lemma 5.1 (using odd-girth(H) ≥ 5), there exists a partition V (H) = A1 ·∪ · · · ·∪ A5 such that
433
+ A1 = {x}, A2 = {y}, and E(H) ⊆ �
434
+ i∈[5] Ai × Ai+1. Put si = |Ai| for i ∈ [5].
435
+ There are at least (|A||B| − |A|)/2 ≥ α2n2/3 pairs {a′, b′} of distinct vertices with a′ ∈ A, b′ ∈ B
436
+ (the factor of 2 is due to the fact that each pair in A ∩ B is counted twice).
437
+ Each such pair a′, b′ has
438
+ at least αn − 2 ≥ αn/2 common neighbors c′ /∈ {a, b}, by assumption.
439
+ Therefore, there are at least
440
+ α2n2
441
+ 3
442
+ · αn
443
+ 2
444
+ = α3n3
445
+ 6
446
+ triples (a′, b′, c′) such that a′ ∈ A, b′ ∈ B, and c′ ̸= a, b is a common neighbor of a′, b′.
447
+ By Lemma 2.1 with H = K2,1 and V1 = A, V2 = B, V3 = V (G)\{a, b}, there are at least poly(α)ns3+s4+s5
448
+ corresponding copies of K2,1[s3, s5, s4], i.e., triples of disjoint sets (R, S, T ) such that R ⊆ A, S ⊆ B, a, b /∈ T ,
449
+ |R| = s5, |S| = s3, |T | = s4, and (R, T ) and (S, T ) form complete bipartite graphs in G. Given any such
450
+ 7
451
+
452
+ triple, we can safely map A1 = {x} to a, A2 = {y} to b, A5 to R, A3 to S, and A4 to T to obtain a copy of
453
+ H. Thus, there are at least poly(α)ns3+s4+s5 = poly(α)nh−2 copies of H mapping xy to ab.
454
+ In the following theorem we prove the upper bound in the second case of Theorem 1.3.
455
+ Theorem 5.4. Let H be a graph such that χ(H) = 3, H has a critical edge xy, and H contains a triangle.
456
+ Then, δlin-rem(H) ≤ 1
457
+ 3.
458
+ Proof. Write h = v(H). Fix an arbitrary α > 0, and let G be an n-vertex graph with minimum degree
459
+ δ(G) ≥ ( 1
460
+ 3 + α)n and with a collection C = {H1, . . . , Hm} of m := εn2 edge-disjoint copies of H.
461
+ For
462
+ each i = 1, . . . , m, there exist u, v, w ∈ V (Hi) forming a triangle (because H contains a triangle).
463
+ As
464
+ degG(u) + degG(v) + degG(w) ≥ 3δ(G) ≥ (1 + 3α)n, two of u, v, w have at least αn common neighbors. We
465
+ denote these two vertices by ai and bi. By Lemma 5.2, G has at least poly(α)nh−2 copies of H which map
466
+ xy to aibi. The edges a1b1, . . . , ambm are distinct because H1, . . . , Hm are edge-disjoint. Hence, summing
467
+ over all i = 1, . . . , m, we see that G contains at least εn2 · poly(α)nh−2 = poly(α)εnh copies of H. This
468
+ proves that δlin-rem(H) ≤ 1
469
+ 3 + α, and taking α → 0 gives δlin-rem(H) ≤ 1
470
+ 3.
471
+ In what follows, we need the following very well-known observation, originating in the work of Andrásfai,
472
+ Erdős and Sós, see [4, Remark 1.6].
473
+ Lemma 5.5. If δ(G) >
474
+ 2
475
+ 2k+1n and odd-girth(G) ≥ 2k + 1 for k ≥ 2, then G is bipartite.
476
+ Proof. Suppose by contradiction that G is not bipartite and take a shortest odd cycle C in G, so |C| ≥ 2k+1.
477
+ As �
478
+ x∈C deg(x) ≥ (2k+1)δ(G) > 2n, there exists a vertex v /∈ C with at least 3 neighbors on C. Then there
479
+ are two neighbors x, y ∈ C of v such that the distance of x, y along C is not equal to 2. Then by taking the
480
+ odd path between x, y along C and adding the edges vx, vy, we get a shorter odd cycle, a contradiction.
481
+ We will also use the following result of Letzter and Snyder, see [17, Corollary 32].
482
+ Theorem 5.6 ([17]). Let G be a {C3, C5}-free graph on n vertices with δ(G) > n
483
+ 4 . Then G is homomorphic
484
+ to C7.
485
+ We can now finally prove the upper bound in the last case of Theorem 1.3.
486
+ Theorem 5.7. Let H be a graph such that χ(H) = 3, H contains critical edge xy, and odd-girth(H) ≥ 5.
487
+ Then δlin-rem(H) ≤ 1
488
+ 4.
489
+ Proof. Denote h = |V (H)|. Write odd-girth(G) = 2k + 1 ≥ 5. By the second item of Lemma 5.1, there
490
+ is a partition V (H) = A1 ·∪ A2 ·∪ · · · ·∪ A2k+1 such that |A1| = |A2| = 1, and E(H) ⊆ �
491
+ i∈[2k+1] Ai × Ai+1.
492
+ Denote si = |Ai| for each i ∈ [2k + 1], so H is a subgraph of the blow-up C2k+1[s1, . . . , s2k+1] of C2k+1. Let
493
+ c1 = c1(C2k+1, s1, . . . , s2k+1) > 0 and c2 = c2(k) > 0 be the constants given by Lemma 2.1 and Lemma 2.3,
494
+ respectively. According to Theorem 1.2, δpoly-rem(C2k+1) =
495
+ 1
496
+ 2k+1 < 1
497
+ 4, and hence there exists a constant
498
+ c3 = c3(k) > 0 such that if G is a graph on n vertices with δ(G) ≥
499
+ n
500
+ 4 and at least εn2 edge-disjoint
501
+ C2k+1-copies, then G contains at least c3ε
502
+ 1
503
+ c3 n2k+1 copies of C2k+1. Set c := c1 · min(c2, c3).
504
+ Let α > 0 and ε be small enough; it suffices to assume that ε <
505
+
506
+ α2
507
+ 200k(k+2)
508
+ �1/c
509
+ . Let G be a graph on
510
+ n vertices with δ(G) ≥ ( 1
511
+ 4 + α)n which contains at least εn2 edge-disjoint copies of H. Our goal is to show
512
+ that G contains ΩH,α(εnh) copies of H. Suppose first that G contains at least εcn2 edge-disjoint copies of
513
+ C2ℓ+1 for some 1 ≤ ℓ ≤ k. If ℓ < k, then G contains Ωk(εc/c2n2k+1) = Ωk(εc1n2k+1) copies of C2k+1 by
514
+ Lemma 2.3 and the choice of c2. And if ℓ = k, then G contains Ωk(εc/c3n2k+1) = Ωk(εc1n2k+1) copies of
515
+ C2k+1 by Theorem 1.2 and the choice of c3. In either case, G contains Ωk(εc1n2k+1) copies of C2k+1. But
516
+ then, by Lemma 2.1 (with V1 = · · · = V2k+1 = V (G)), G contains at least ΩH(εc1/c1nh) = ΩH(εnh) copies
517
+ of C2k+1[s1, . . . , s2k+1], and hence ΩH,α(εnh) copies of H. This concludes the proof of this case.
518
+ From now on, assume that G contains at most εcn2 edge-disjoint C2ℓ+1-copies for every ℓ ∈ [k]. Let Cℓ
519
+ be a maximal collection of edge-disjoint C2ℓ+1-copies in G, so |Cℓ| ≤ εcn2. Let Ec be the set of edges which
520
+ 8
521
+
522
+ are contained in one of the cycles in C1 ∪ · · · ∪ Ck. Let S be the set of vertices which are incident with at
523
+ least αn
524
+ 10 edges from Ec. Then
525
+ |Ec| ≤
526
+ k
527
+
528
+ ℓ=1
529
+ (2ℓ + 1)εcn2 = k(k + 2)εcn2 and |S| ≤ 2 |Ec|
530
+ αn/10 ≤ 20k(k + 2)εc
531
+ α
532
+ n < αn
533
+ 10 ,
534
+ (1)
535
+ where the last inequality holds by our assumed bound on ε. Let G′ be the subgraph of G obtained by deleting
536
+ the edges in Ec and the vertices in S. Note that G′ ⊆ G − Ec is {C3, C5, . . . , C2k+1}-free because for every
537
+ 1 ≤ ℓ ≤ k, we removed all edges from a maximal collection of edge-disjoint C2ℓ+1-copies.
538
+ Claim 5.8. |V (G′)| > (1 − α
539
+ 10)n and δ(G′) > ( 1
540
+ 4 + 4α
541
+ 5 )n.
542
+ Proof. The first inequality follows from (1) as |V (G′)| = n − |S|. Each v ∈ V (G)\S has at most αn
543
+ 10 incident
544
+ edges from Ec, and at most |S| < αn
545
+ 10 neighbors in S, thereby degG′(v) > degG(v) − αn
546
+ 5 ≥ ( 1
547
+ 4 + 4α
548
+ 5 )n. Hence,
549
+ δ(G′) > ( 1
550
+ 4 + 4α
551
+ 5 )n.
552
+ Claim 5.9. G′ is homomorphic to C7. Moreover, G′ is bipartite unless k = 2.
553
+ Proof. Recall that G′ is {C3, C5, . . . , C2k+1}-free. As k ≥ 2, G′ is {C3, C5}-free. Also, δ(G′) > n
554
+ 4 ≥ |V (G′)|
555
+ 4
556
+ by Claim 5.8.
557
+ So G′ is homomorphic to C7 by Theorem 5.6.
558
+ If k ≥ 3, i.e.
559
+ odd-girth(H) ≥ 7, then
560
+ odd-girth(G′) ≥ 2k + 3 ≥ 9. As δ(G′) > n
561
+ 4 , G′ is bipartite by Lemma 5.5.
562
+ The rest of the proof is divided into two cases based whether or not G′ is bipartite. These cases are handled
563
+ by Propositions 5.10 and 5.11, respectively.
564
+ Proposition 5.10. Suppose that G′ is bipartite. Then G has ΩH,α(εnh) copies of H.
565
+ Proof. Let (L′, R′) be a bipartition of G′, so V (G) = L′ ·∪ R′ ·∪ S. Let L1 ⊆ S (resp. R1 ⊆ S) be the set of
566
+ vertices of S having at most αn
567
+ 5 neighbors in L′ (resp. R′). Let G′′ be the bipartite subgraph of G induced
568
+ by the bipartition (L′′, R′′) := (L′ ·∪ L1, R′ ·∪ R1). Let S′′ = V (G)\(L′′ ·∪ R′′), so V (G) = L′′ ·∪ R′′ ·∪ S′′.
569
+ We claim that δ(G′′) ≥ ( 1
570
+ 4 + α
571
+ 2 )n. First, as G′ is a subgraph of G′′, we have degG′′(v) > ( 1
572
+ 4 + 4α
573
+ 5 )n for
574
+ each v ∈ V (G′) ⊆ V (G′′) by Claim 5.8. Now we consider vertices in V (G′′) \ V (G′) = L1 ∪ R1. Each v ∈ L1
575
+ has at most |S| ≤ αn
576
+ 10 neighbors in S and at most αn
577
+ 5
578
+ neighbors in L′, by the definition of L1. Hence, v
579
+ has at least degG(v) − 3α
580
+ 10 n ≥ ( 1
581
+ 4 + α
582
+ 2 )n neighbors in R′ ⊆ V (G′′). By the symmetric argument for vertices
583
+ v ∈ R1, we get that δ(G′′) ≥ ( 1
584
+ 4 + α
585
+ 2 )n, as required.
586
+ For an edge uv ∈ E(G)\E(G′′), we say uv is of type I if u, v ∈ L′′ or u, v ∈ R′′, and we say that uv is
587
+ of type II if u ∈ S′′ or v ∈ S′′. Every edge in E(G)\E(G′′) is of type I or II. Since χ(H) = 3 and G′′ is
588
+ bipartite, each copy of H in G must contain an edge of type I or an edge of type II (or both). As G has
589
+ εn2 edge-disjoint H-copies, G contains at least εn2
590
+ 2
591
+ edges of type I or at least εn2
592
+ 2
593
+ edges of type II. We now
594
+ consider these two cases separately. See Fig. 4 for an illustration. Recall that xy ∈ E(H) denotes a critical
595
+ edge of H.
596
+ Case 1:
597
+ G contains εn2
598
+ 2
599
+ edges of type I.
600
+ Fix any edge ab ∈ E(G) of type I. Without loss of generality,
601
+ assume a, b ∈ L′′ (the case a, b ∈ R′′ is symmetric). We claim that G has poly(α)nh−2 copies of H mapping
602
+ xy ∈ E(H) to ab ∈ E(G). If degG(a, b) ≥ αn
603
+ 2 then this holds by Lemma 5.2. Otherwise, degG(a, b) < αn
604
+ 2 ,
605
+ and thus
606
+ |R′′| ≥ |NG′′(a) ∪ NG′′(b)| ≥ degG′′(a) + degG′′(b) − degG(a, b) > 2δ(G′′) − αn
607
+ 2 > n
608
+ 2 ,
609
+ using that δ(G′′) ≥ ( 1
610
+ 4 + α
611
+ 2 )n. Thus, |L′′| < n
612
+ 2 . This implies that for all a′ ∈ NG′′(a), b′ ∈ NG′′(b),
613
+ degG′′(a′, b′) ≥ 2δ(G′′) − |L′′| ≥ αn.
614
+ Now, by Lemma 5.3 (with A = NG′′(a) and B = NG′′(b)), there are poly(α)nh−2 copies of H mapping xy
615
+ to ab, as claimed. Summing over all edges ab of type I, we get εn2
616
+ 2 · poly(α)nh−2 = poly(α)εnh copies of H.
617
+ This completes the proof in Case 1.
618
+ 9
619
+
620
+ L′′
621
+ R′′
622
+ a
623
+ b
624
+ L′′
625
+ R′′
626
+ a
627
+ b
628
+ a′
629
+ b′
630
+ L′′
631
+ R′′
632
+ S′′
633
+ a
634
+ b
635
+ a′
636
+ b′
637
+ Figure 4: Proof of Proposition 5.10: Case 1 with degG(a, b) ≥
638
+ αn
639
+ 2
640
+ (left), Case 1 with degG(a, b) <
641
+ αn
642
+ 2
643
+ (middle) and Case 2 (right). The red part is the common neighborhood of a and b (or a′ and b′).
644
+ Case 2:
645
+ G contains εn2
646
+ 2
647
+ edges of type II.
648
+ Note that the number of edges of type II is trivially at most
649
+ |S′′| n.
650
+ Thus, |S′′| ≥
651
+ εn
652
+ 2 .
653
+ Fix some a ∈ S′′.
654
+ By the definition of L′′, R′′ and S′′, v has at least
655
+ αn
656
+ 5
657
+ neighbors in L′ ⊆ L′′ and at least αn
658
+ 5 neighbors in R′ ⊆ R′′. Without loss of generality, assume |L′′| ≤ |R′′|,
659
+ thereby |L′′| ≤
660
+ n
661
+ 2 .
662
+ Now fix any b ∈ L′′ adjacent to a; there are at least
663
+ αn
664
+ 5
665
+ choices for b.
666
+ We have
667
+ |NG(a) ∩ R′′| ≥ αn
668
+ 5 and |NG′′(b)| ≥ δ(G′′) > n
669
+ 4 , and for all a′ ∈ NG(a) ∩ R′′, b′ ∈ NG′′(b) ⊆ R′′ it holds that
670
+ degG′′(a′, b′) ≥ 2δ(G′′) − |L′′| ≥ αn. Therefore, by Lemma 5.3, G has poly(α)nh−2 copies of H mapping xy
671
+ to ab. Enumerating over all a ∈ S′′ and b ∈ NG(a) ∩ L′′, we again get ΩH,α(εnh) copies of H in G. This
672
+ completes the proof of Proposition 5.10.
673
+ Proposition 5.11. Suppose G′ is non-bipartite but homomorphic to C7. Then G has ΩH,α(εnh) copies of H.
674
+ Proof. By Claim 5.9 we must have k = 2 , so odd-girth(H) = 5. The proof is similar to that of Proposi-
675
+ tion 5.10, but instead of a bipartition of G′, we use a partition corresponding to a homomorphism into C7.
676
+ Let V (G)\S = V (G′) = V ′
677
+ 1 ·∪ V ′
678
+ 2 ·∪ · · · ·∪ V ′
679
+ 7 be a partition of V (G′) such that E(G′) ⊆ �
680
+ i∈[7] V ′
681
+ i × V ′
682
+ i+1.
683
+ Here and later, all subscripts are modulo 7. We have V ′
684
+ i ̸= ∅ for all i ∈ [7], because otherwise G′ would be
685
+ bipartite. For i ∈ [7], let Si be the set of vertices in S having at most 2αn
686
+ 5
687
+ neighbors in V (G′)\ (V ′
688
+ i−1 ∪V ′
689
+ i+1).
690
+ In case v lies in multiple Si’s, we put v arbitrarily in one of them. Set V ′′
691
+ i
692
+ := V ′
693
+ i ∪ Si. Let G′′ be the
694
+ 7-partite subgraph of G with parts V ′′
695
+ 1 , . . . , V ′′
696
+ 7 and with all edges of G between V ′′
697
+ i
698
+ and V ′′
699
+ i+1, i = 1, . . . , 7.
700
+ By definition, G′ is a subgraph of G′′, and G′′ is homomorphic to C7 via the homomorphism V ′′
701
+ i �→ i. Put
702
+ S′′ := V (G)\V (G′′) = S \ �7
703
+ i=1 Si. We now collect the following useful properties.
704
+ Claim 5.12. The following holds:
705
+ (i) δ(G′′) ≥ ( 1
706
+ 4 + α
707
+ 2 )n.
708
+ (ii) For every i ∈ [7] and for every u, v ∈ V ′′
709
+ i
710
+ or u ∈ V ′′
711
+ i , v ∈ V ′′
712
+ i+2, it holds that degG′′(u, v) ≥ αn.
713
+ (iii) For every i ∈ [7], every v ∈ V ′′
714
+ i
715
+ has at least αn neighbors in V ′′
716
+ i−1 and at least αn neighbors in V ′′
717
+ i+1.
718
+ (iv) For every a ∈ S′′, there are i, j with j − i ≡ 1, 3 (mod 7) and |NG(a) ∩ V ′′
719
+ i | ,
720
+ ��NG(a) ∩ V ′′
721
+ j
722
+ �� > 2αn
723
+ 25 .
724
+ Proof. fds
725
+ (i) Let i ∈ [7] and v ∈ V ′′
726
+ i . If v ∈ V (G′), then degG′′(v) ≥ degG′(v) ≥ δ(G′) > ( 1
727
+ 4 + α
728
+ 2 )n, using Claim 5.8.
729
+ Otherwise, v ∈ Si. By definition, v has at most 2αn
730
+ 5
731
+ neighbours in V (G′)\(V ′
732
+ i−1 ∪V ′
733
+ i+1). Also, v has at
734
+ most |S| ≤ αn
735
+ 10 neighbours in S. It follows that v has at least degG(v)− 2αn
736
+ 5 − αn
737
+ 10 ≥ ( 1
738
+ 4 + α
739
+ 2 )n neighbors
740
+ in V ′′
741
+ i−1 ∪ V ′′
742
+ i+1. Hence, degG′′(v) > ( 1
743
+ 4 + α
744
+ 2 )n.
745
+ (ii) First, observe that
746
+ |V ′′
747
+ i | +
748
+ ��V ′′
749
+ i+2
750
+ �� ≥
751
+ �1
752
+ 4 + α
753
+ 2
754
+
755
+ n
756
+ (2)
757
+ 10
758
+
759
+ for all i ∈ [7]. Indeed, V ′′
760
+ i+1 is non-empty, and fixing any v ∈ V ′′
761
+ i+1, we have |V ′′
762
+ i | +
763
+ ��V ′′
764
+ i+2
765
+ �� ≥ degG′′(v) ≥
766
+ δ(G′′) ≥ ( 1
767
+ 4 + α
768
+ 2 )n. By applying (2) to the pairs (i + 2, i + 4) and (i − 2, i), we get
769
+ ��V ′′
770
+ i−1
771
+ �� +
772
+ ��V ′′
773
+ i+1
774
+ �� +
775
+ ��V ′′
776
+ i+3
777
+ �� ≤ n − (
778
+ ��V ′′
779
+ i+2
780
+ �� +
781
+ ��V ′′
782
+ i+4
783
+ ��) − (
784
+ ��V ′′
785
+ i−2
786
+ �� + |V ′′
787
+ i |) ≤ n − 2
788
+ �1
789
+ 4 + α
790
+ 2
791
+
792
+ n < n
793
+ 2 .
794
+ (3)
795
+ Now let i ∈ [7]. For u, v ∈ V ′′
796
+ i
797
+ we have NG′′(u) ∪ NG′′(v) ⊆ V ′′
798
+ i−1 ∪ V ′′
799
+ i+1, and for u ∈ V ′′
800
+ i , v ∈ V ′′
801
+ i+2
802
+ we have NG′′(u) ∪ NG′′(v) ⊆ V ′′
803
+ i−1 ∪ V ′′
804
+ i+1 ∪ V ′′
805
+ i+3. In both cases, |NG′′(u) ∪ NG′′(v)| < n
806
+ 2 by (3). As
807
+ degG′′(u) + degG′′(v) ≥ 2δ(G′′) ≥ ( 1
808
+ 2 + α)n, we have degG′′(u, v) > αn, as required.
809
+ (iii) We first argue that |V ′′
810
+ i | ≤ ( 1
811
+ 4 − 3α
812
+ 2 )n for each i ∈ [7]. Indeed, by applying (2) to the pairs (i − 1, i + 1),
813
+ (i + 2, i + 4), (i + 3, i + 5), we get
814
+ |V ′′
815
+ i | ≤ n − (
816
+ ��V ′′
817
+ i−1
818
+ �� +
819
+ ��V ′′
820
+ i+1
821
+ ��) − (
822
+ ��V ′′
823
+ i+2
824
+ �� +
825
+ ��V ′′
826
+ i+4
827
+ ��) − (
828
+ ��V ′′
829
+ i+3
830
+ �� +
831
+ ��V ′′
832
+ i+5
833
+ ��) ≤ n − 3
834
+ �1
835
+ 4 + α
836
+ 2
837
+
838
+ n =
839
+ �1
840
+ 4 − 3α
841
+ 2
842
+
843
+ n.
844
+ Now, for every v ∈ V ′′
845
+ i , we have NG′′(v) ⊆ V ′′
846
+ i−1 ∪ V ′′
847
+ i+1 and
848
+ ��V ′′
849
+ i−1
850
+ �� ,
851
+ ��V ′′
852
+ i+1
853
+ �� < ( 1
854
+ 4 − 3α
855
+ 2 )n. Hence, v has
856
+ at least degG′′(v) − ( 1
857
+ 4 − 3α
858
+ 2 )n ≥ αn neighbors in each of V ′′
859
+ i−1, V ′′
860
+ i+1.
861
+ (iv) Let I be the set of i with |NG(a) ∩ V ′′
862
+ i | ≥ 2αn
863
+ 25 . If I is empty, then a has less than 5 · 2αn
864
+ 25
865
+ = 2αn
866
+ 5
867
+ neighbors in every V (G′)\(V ′
868
+ i−1 ∪V ′
869
+ i+1) and therefore can not be in S′′. Suppose for contradiction that
870
+ there exist no i, j ∈ I with j − i ≡ 1, 3 (mod 7). We claim that there is j ∈ [7] such that I ⊆ {j, j + 2}.
871
+ Fix an arbitrary i ∈ I. Then, i ± 1, i ± 3 /∈ I by assumption. Also, at most one of i + 2, i − 2 is
872
+ in I, because (i − 2) − (i + 2) ≡ 3 (mod 7). So I ⊆ {i, i + 2} or I ⊆ {i − 2, i}, proving our claim
873
+ that I ⊆ {j, j + 2} for some j. By the definition of I, a has at most 5 · 2αn
874
+ 25
875
+ =
876
+ 2αn
877
+ 5
878
+ neighbors in
879
+ V (G′)\(V ′
880
+ j ∪ V ′
881
+ j+2). Hence, a ∈ Sj+1. This contradicts the fact that a ∈ S′′, as S′′ ∩ Si+1 = ∅.
882
+ We continue with the proof of Proposition 5.11. Recall that the edges in E(G) \ E(G′′) are precisely
883
+ the edges of G not belonging to �
884
+ i∈[7] V ′′
885
+ i × V ′′
886
+ i+1. For an edge ab ∈ E(G)\E(G′′), we say ab is of type I if
887
+ a, b ∈ V (G′′), and of type II if a ∈ S′′ or b ∈ S′′. Clearly, every edge in E(G)\E(G′′) is either of type I
888
+ or of type II. Since odd-girth(H) = 5 and C5 is not homomorphic to C7, every H-copy in G must contain
889
+ some edge of type I or of type II (or both). As G has εn2 edge-disjoint H-copies, G must have at least εn2
890
+ 2
891
+ edges of type I or at least εn2
892
+ 2
893
+ edges of type II. We consider these two cases separately. See Fig. 5 for an
894
+ illustration. Recall that xy ∈ E(H) denotes a critical edge of H.
895
+ Case 1:
896
+ G contains εn2
897
+ 2
898
+ edges of type I. Fix any edge ab of type I, where a ∈ V ′′
899
+ i and b ∈ V ′′
900
+ j for i, j ∈ [7].
901
+ We now show that G has poly(α)nh−2 copies of H mapping xy ∈ E(H) to ab. As ab /∈ E(G′′), we have
902
+ i−j ≡ 0, ±2, ±3 (mod 7). When j−i ≡ 0, ±2 (mod 7), we have degG(a, b) ≥ degG′′(a, b) > αn by Claim 5.12
903
+ (ii). Then, by Lemma 5.2, G has poly(α)nh−2 copies of H mapping xy to ab, as required. Now suppose that
904
+ j−i ≡ ±3 (mod 7), say j ≡ i+3 (mod 7). Denote A := NG(a)∩V ′′
905
+ i−1 and B := NG(b)∩V ′′
906
+ j+1 = NG(b)∩V ′′
907
+ i−3.
908
+ We have that |A| , |B| ≥ αn by Claim 5.12 (iii), and |NG(a′, b′)| > αn for all a′ ∈ A, b′ ∈ B by Claim 5.12
909
+ (ii). Now, by Lemma 5.3, G has poly(α)nh−2 copies of H mapping xy to ab, proving our claim. Summing
910
+ over all edges ab of type I, we get εn2
911
+ 2 · poly(α)nh−2 = ΩH,α(εnh) copies of H in G, finishing this case.
912
+ Case 2:
913
+ G contains εn2
914
+ 2
915
+ edges of type II. Notice that the number edges incident to S′′ is at most |S′′| n,
916
+ meaning that |S′′| ≥ εn
917
+ 2 . Fix any a ∈ S′′. By Claim 5.12 (iv), there exist i, j ∈ [7] with j − i ≡ 1, 3 (mod 7)
918
+ and |NG(a) ∩ V ′′
919
+ i | ,
920
+ ��NG(a) ∩ V ′′
921
+ j
922
+ �� > 2αn
923
+ 25 . Fix any b ∈ NG(a) ∩ V ′′
924
+ i (there are at least 2αn
925
+ 25 choices for b). Take
926
+ A = NG(a)∩V ′′
927
+ j and B = NG(b)∩V ′′
928
+ i+1. We have that |A| ≥ 2αn
929
+ 25 , and |B| ≥ αn by Claim 5.12 (iii). Further,
930
+ as j − (i + 1) ≡ 0, 2 (mod 7), Claim 5.12 (ii) implies that |NG(a′, b′)| > αn for all a′ ∈ A, b′ ∈ B. Now,
931
+ by Lemma 5.3, G has poly(α)nh−2 copies of H mapping xy to ab. Summing over all choices of a ∈ S′′ and
932
+ b ∈ V ′′
933
+ i , we acquire |S′′| · 2αn
934
+ 25 · poly(α)nh−2 = ΩH,α(εnh) copies of H in G. This completes the proof of Case
935
+ 2, and hence the proposition.
936
+ Propositions 5.10 and 5.11 imply the theorem.
937
+ 11
938
+
939
+ V ′′
940
+ 1
941
+ V ′′
942
+ 2
943
+ V ′′
944
+ 3
945
+ V ′′
946
+ 4
947
+ V ′′
948
+ 5
949
+ V ′′
950
+ 6
951
+ V ′′
952
+ 7
953
+ a
954
+ b
955
+ V ′′
956
+ 1
957
+ V ′′
958
+ 2
959
+ V ′′
960
+ 3
961
+ V ′′
962
+ 4
963
+ V ′′
964
+ 5
965
+ V ′′
966
+ 6
967
+ V ′′
968
+ 7
969
+ a
970
+ b
971
+ a′
972
+ b′
973
+ V ′′
974
+ 1
975
+ V ′′
976
+ 2
977
+ V ′′
978
+ 3
979
+ V ′′
980
+ 4
981
+ V ′′
982
+ 5
983
+ V ′′
984
+ 6
985
+ V ′′
986
+ 7
987
+ S′′ a
988
+ b
989
+ a′
990
+ b′
991
+ Figure 5: Proof of Proposition 5.11: Case 1 for j = i + 2 (left), Case 1 for j = i + 3 (middle) and Case 2 for
992
+ j = i + 3 (right). The red part is the common neighborhood of a and b (or a′ and b′).
993
+ 6
994
+ Concluding remarks and open questions
995
+ It would be interesting to determine the possible values of δpoly-rem(H) for 3-chromatic graphs H. So far we
996
+ know that
997
+ 1
998
+ 2k+1 is a value for each k ≥ 1. Is there a graph H with 1
999
+ 5 < δpoly-rem(H) < 1
1000
+ 3? Also, is it true
1001
+ that δpoly-rem(H) > 1
1002
+ 5 if H is not homomorphic to C5?
1003
+ Another question is whether the inequality in Theorem 1.4 is always tight, i.e. is it always true that
1004
+ δpoly-rem(H) = δhom(IH)?
1005
+ Finally, we wonder whether the parameters δpoly-rem(H) and δlin-rem(H) are monotone, in the sense that
1006
+ they do not increase when passing to a subgraph of H. We are not aware of a way of proving this without
1007
+ finding δpoly-rem(H), δlin-rem(H).
1008
+ References
1009
+ [1] P. Allen, J. Böttcher, S. Griffiths, Y. Kohayakawa, and R. Morris. The chromatic thresholds of graphs.
1010
+ Advances in Mathematics, 235:261–295, 2013. 1
1011
+ [2] N. Alon. Testing subgraphs in large graphs. Random Structures & Algorithms, 21(3-4):359–370, 2002.
1012
+ 1, 3, 1
1013
+ [3] N. Alon and J. H. Spencer. The probabilistic method. John Wiley & Sons, 2016. 4
1014
+ [4] B. Andrásfai, P. Erdös, and V. T. Sós. On the connection between chromatic number, maximal clique
1015
+ and minimal degree of a graph. Discrete Mathematics, 8(3):205–218, 1974. 1, 5
1016
+ [5] S. Brandt. On the structure of dense triangle-free graphs. Combinatorics, Probability and Computing,
1017
+ 8(3):237–245, 1999. 1
1018
+ [6] S. Brandt and S. Thomassé. Dense triangle-free graphs are four-colorable: A solution to the Erdős-
1019
+ Simonovits problem. preprint, 2011. 1
1020
+ [7] C.-C. Chen, G. P. Jin, and K. M. Koh. Triangle-free graphs with large degree. Combinatorics, Probability
1021
+ and Computing, 6(4):381–396, 1997. 1
1022
+ [8] O. Ebsen and M. Schacht. Homomorphism thresholds for odd cycles. Combinatorica, 40(1):39–62, 2020.
1023
+ 1, 1
1024
+ [9] P. Erdös. On extremal problems of graphs and generalized graphs. Israel Journal of Mathematics,
1025
+ 2(3):183–190, 1964. 2
1026
+ [10] P. Erdős and M. Simonovits. On a valence problem in extremal graph theory. Discrete Mathematics,
1027
+ 5(4):323–334, 1973. 1
1028
+ [11] J. Fox. A new proof of the graph removal lemma. Annals of Mathematics, pages 561–579, 2011. 1
1029
+ 12
1030
+
1031
+ [12] J. Fox and Y. Wigderson. Minimum degree and the graph removal lemma. Journal of Graph Theory,
1032
+ 2021. 1, 1, 1, 1, 2, 3, 3
1033
+ [13] L. Gishboliner, A. Shapira, and Y. Wigderson. An efficient asymmetric removal lemma and its limita-
1034
+ tions. arXiv preprint arXiv:2301.07693, 2023. 2
1035
+ [14] W. Goddard and J. Lyle. Dense graphs with small clique number. Journal of Graph Theory, 66(4):319–
1036
+ 331, 2011. 1
1037
+ [15] R. Häggkvist. Odd cycles of specified length in non-bipartite graphs. In North-Holland Mathematics
1038
+ Studies, volume 62, pages 89–99. Elsevier, 1982. 1
1039
+ [16] G. Jin. Triangle-free four-chromatic graphs. Discrete Mathematics, 145(1-3):151–170, 1995. 1
1040
+ [17] S. Letzter and R. Snyder. The homomorphism threshold of {C3, C5}-free graphs. Journal of Graph
1041
+ Theory, 90(1):83–106, 2019. 1, 5, 5.6
1042
+ [18] T. Łuczak. On the structure of triangle-free graphs of large minimum degree. Combinatorica, 26(4):489–
1043
+ 493, 2006. 1
1044
+ [19] T. Łuczak and S. Thomassé. Coloring dense graphs via VC-dimension. arXiv preprint arXiv:1007.1670,
1045
+ 2010. 1
1046
+ [20] J. Lyle. On the chromatic number of H-free graphs of large minimum degree. Graphs and Combinatorics,
1047
+ 27(5):741–754, 2011. 1
1048
+ [21] Y. Nakar and D. Ron. On the testability of graph partition properties. In Approximation, Randomiza-
1049
+ tion, and Combinatorial Optimization. Algorithms and Techniques (APPROX/RANDOM 2018). Schloss
1050
+ Dagstuhl-Leibniz-Zentrum fuer Informatik, 2018. 4, 4
1051
+ [22] H. Oberkampf and M. Schacht. On the structure of dense graphs with bounded clique number. Com-
1052
+ binatorics, Probability and Computing, 29(5):641–649, 2020. 1
1053
+ [23] I. Z. Ruzsa and E. Szemerédi. Triple systems with no six points carrying three triangles. In Combi-
1054
+ natorics (Proc. Fifth Hungarian Colloq., Keszthely, 1976), Vol. II, Colloq. Math. Soc. János Bolyai,,
1055
+ volume 18, pages 939–945. North-Holland, Amsterdam-New York, 1978. 1
1056
+ [24] M. Sankar. Homotopy and the homomorphism threshold of odd cycles. arXiv preprint arXiv:2206.07525,
1057
+ 2022. 1
1058
+ [25] C. Thomassen. On the chromatic number of triangle-free graphs of large minimum degree. Combina-
1059
+ torica, 22(4):591–596, 2002. 1
1060
+ [26] C. Thomassen. On the chromatic number of pentagon-free graphs of large minimum degree. Combina-
1061
+ torica, 27(2):241–243, 2007. 1
1062
+ 13
1063
+
3dFST4oBgHgl3EQfYzh6/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
49A0T4oBgHgl3EQfNv86/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6503929c15108ce7ea45b4c8accf863438e2f04a9512a8567c8c763233d742b0
3
+ size 4063277
5dA0T4oBgHgl3EQfNv_S/content/tmp_files/2301.02152v1.pdf.txt ADDED
@@ -0,0 +1,2837 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ L-HYDRA: MULTI-HEAD PHYSICS-INFORMED NEURAL
2
+ NETWORKS
3
+ ZONGREN ZOU∗ AND GEORGE EM KARNIADAKIS†
4
+ Abstract. We introduce multi-head neural networks (MH-NNs) to physics-informed machine
5
+ learning, which is a type of neural networks (NNs) with all nonlinear hidden layers as the body and
6
+ multiple linear output layers as multi-head. Hence, we construct multi-head physics-informed neural
7
+ networks (MH-PINNs) as a potent tool for multi-task learning (MTL), generative modeling, and
8
+ few-shot learning for diverse problems in scientific machine learning (SciML). MH-PINNs connect
9
+ multiple functions/tasks via a shared body as the basis functions as well as a shared distribution
10
+ for the head. The former is accomplished by solving multiple tasks with MH-PINNs with each head
11
+ independently corresponding to each task, while the latter by employing normalizing flows (NFs) for
12
+ density estimate and generative modeling. To this end, our method is a two-stage method, and both
13
+ stages can be tackled with standard deep learning tools of NNs, enabling easy implementation in
14
+ practice. MH-PINNs can be used for various purposes, such as approximating stochastic processes,
15
+ solving multiple tasks synergistically, providing informative prior knowledge for downstream few-shot
16
+ learning tasks such as meta-learning and transfer learning, learning representative basis functions,
17
+ and uncertainty quantification. We demonstrate the effectiveness of MH-PINNs in five benchmarks,
18
+ investigating also the possibility of synergistic learning in regression analysis. We name the open-
19
+ source code “Lernaean Hydra” (L-HYDRA), since this mythical creature possessed many heads for
20
+ performing important multiple tasks, as in the proposed method.
21
+ Key words.
22
+ PINNs, meta-learning, multi-tasking, transfer learning, generative models, nor-
23
+ malizing flows, stochastic problems
24
+ MSC codes. 34F05, 62M45, 65L99, 65M99, 65N99
25
+ 1. Introduction. Learning across tasks has drawn great attention recently in
26
+ deep learning and is an emerging theme in scientific machine learning (SciML), due
27
+ to the fact that several classes of scientific problems are similar and/or related in-
28
+ trinsically by their common physics.
29
+ Intuitively, if tasks are similar, e.g., in the
30
+ context of approximating stochastic processes [44], learning solution operators of ordi-
31
+ nary/partial differential equations (ODEs/PDEs) [28], and solving parametric PDEs
32
+ [42, 19, 4], it may be beneficial to relate them in the modeling, algorithm design,
33
+ and/or solving procedure. In this regard, machine learning solvers, developed rapidly
34
+ in the past few years, are considerably more flexible and of higher potential compared
35
+ to traditional numerical solvers. Significant progress has been witnessed in the general
36
+ area, including meta-learning for solving ODEs/PDEs [30, 27, 34, 6], transfer learning
37
+ for physics-informed neural networks (PINNs) [3, 7], transfer learning for domain shift
38
+ in solving PDEs [14], multi-task learning for PINNs [40], and generative methods for
39
+ solving stochastic differential equations (SDEs) [44, 46, 15]. More recently, operator
40
+ learning [28, 24] in which direct operator mapping is learned and subsequently used
41
+ for other tasks in one-shot format has attracted a lot of attention.
42
+ Multi-head neural networks (MH-NNs) fit perfectly different scenarios of learning
43
+ across tasks. They were originally proposed as members of hard-parameter sharing
44
+ neural networks (NNs) for deep multi-task learning (MTL) [5], in which multiple
45
+ tasks, denoted as Tk, k = 1, ..., M, where M is the number of total tasks, are solved
46
+ simultaneously. The general goals of using MH-NNs in MTL are diverse: achieving
47
+ ∗Division of Applied Mathematics,
48
+ Brown University,
49
+ Providence,
50
+ RI 02912,
51
+ USA (zon-
52
+ gren zou@brown.edu).
53
+ †Corresponding author.
54
+ Division of Applied Mathematics, Brown University, Providence, RI
55
+ 02912, USA (george karniadakis@brown.edu).
56
+ 1
57
+ arXiv:2301.02152v1 [cs.LG] 5 Jan 2023
58
+
59
+ 2
60
+ Z. ZOU AND G. E. KARNIADAKIS
61
+ better performance for all tasks, learning good and useful representations for down-
62
+ stream tasks, and/or boosting the learning of main tasks with the help of auxiliary
63
+ tasks. Moreover, although originally designed for solving multiple tasks, MH-NNs in
64
+ recent years have also been extensively used for meta-learning. For example, in [41],
65
+ the connection between MTL and meta-learning was analyzed, and meta-learning al-
66
+ gorithms for MH-NN were discussed; in [25], it was shown that MH-NNs, trained in
67
+ MTL fashion also perform task-specific adaptation in meta-learning; [37] argued that
68
+ the effectiveness of model-agnostic meta-learning [10], a well-known meta-learning al-
69
+ gorithm, may be due to successfully learned good representations rather than learned
70
+ adaptation, and MH-NNs were used to study the detailed contributions of NNs in
71
+ fast task adaptations. Overall, it is commonly acknowledged in the literature that
72
+ when used to solve previous tasks, MH-NNs are capable of distilling useful shared
73
+ information and storing it in their bodies and heads.
74
+ In this paper, we develop MH-NNs for physics-informed machine learning [17],
75
+ propose multi-head physics-informed neural networks (MH-PINNs), and further in-
76
+ vestigate their applicability and capabilities to MTL, generative modeling, and meta-
77
+ learning. A MH-PINN, as shown in Fig. 1, is built upon a conventional MH-NN and
78
+ consists of two main parts, the body and multiple heads, and each head connects to
79
+ a specific ODE/PDE task. Many architecture splitting strategies for MH-NNs are
80
+ adopted in different applications scenarios; e.g., for some computer vision problems,
81
+ a NN is split such that the body consists of convolutional layers and is followed by
82
+ fully-connected layers as heads. In this paper, however, we choose the simplest one,
83
+ i.e., the body consists of all nonlinear layers and the head is the last linear layer,
84
+ for the following two reasons: (1) the dimensionality of the head is reduced, which
85
+ enables fast density estimation (see next section); and (2) the body spontaneously
86
+ provides a set of basis functions.
87
+ Fig. 1. Schematic view of the structure of multi-head physics-informed neural networks (MH-
88
+ PINNs) with M different heads, which are built upon conventional multi-head neural networks.
89
+ The shared layers are often referred to as body and the task-specific layer as head.
90
+ Generally,
91
+ uk, k = 1, ..., M represent M solutions to M different ODEs/PDEs, formulated in Eq. (2.1), which
92
+ may differ in source terms fk, boundary/initial condition terms bk, or differential operator Fk.
93
+ The novelty and major contributions of this work are as follows:
94
+ 1. We propose a new physics-informed generative method using MH-PINNs for
95
+ learning stochastic processes from data and physics.
96
+ 2. We propose a new method for physics-informed few-shot regression problems
97
+ with uncertainty quantification using MH-PINNs.
98
+ 3. We study and demonstrate the effectiveness of MTL and synergistic learning
99
+ with MH-NNs in regression problems.
100
+ The paper is organized as follows. In Sec. 2, we present the problem formulation,
101
+
102
+ Fiui(α)/ = fi(α), Biui(α)/ = bi(α)
103
+ head
104
+ task,
105
+ 1
106
+ F2[u2(α)] = f2(α), B2[u2(α)] = b2(α)
107
+ head,
108
+ u2
109
+ Body
110
+ -
111
+ FM[uM(α)] = fM(α), BM[uM(c)] = bM(c)
112
+ head
113
+ uM
114
+ task,
115
+ ML-HYDRA
116
+ 3
117
+ details of MH-PINNs, and the general methodology, including how to use MH-PINNs
118
+ for MTL, generative modeling, downstream few-shot physics-informed learning with
119
+ uncertainty quantification (UQ). In Sec. 3, we discuss existing research closely related
120
+ to our work and compare them conceptually. In Sec. 4, we test MH-PINNs with five
121
+ benchmarks, each of which corresponds to one or more learning purposes, e.g., MTL
122
+ and generative modeling.
123
+ In Sec. 5, we investigate MTL and synergistic learning
124
+ with the function approximation example. We conclude and summarize in Sec. 6.
125
+ The details of our experiments, such as NN architectures and training strategies, can
126
+ be found in Appendix A and B, as well as in the L-HYDRA open-source codes on
127
+ GitHub, which will be released once the paper is accepted.
128
+ 2. Methodology. We assume that we have a family of tasks, {Tk}M
129
+ k=1, each of
130
+ which is associated with data Dk, k = 1, ..., M. The primary focus of this paper is on
131
+ scientific computing and ODEs/PDEs, and therefore we further assume {Tk}M
132
+ k=1 are
133
+ physics-informed regression problems [17].
134
+ Consider a PDE of the following form:
135
+ Fk[uk(x)] = fk(x), x ∈ Ωk,
136
+ (2.1a)
137
+ Bk[uk(x)] = bk(x), x ∈ ∂Ωk,
138
+ (2.1b)
139
+ where k denotes the index of the task and k = 1, ..., M, x is the general spatial-
140
+ temporal coordinate of Dx dimensions, Ωk are bounded domains, fk and uk are the
141
+ Du-dimensional source terms and solutions to the PDE, respectively, Fk are general
142
+ differential operators, Bk are general boundary/initial condition operators, and bk are
143
+ boundary/initial condition terms. For simplicity, throughout this paper, the domain
144
+ and the boundary/initial operator, denoted as Ω and B, are assumed to be the same
145
+ for all tasks, and the solutions uk to be task-specific. The task Tk is described as
146
+ approximating uk, and/or fk, and/or Fk, and/or bk, from data Dk and Eq. (2.1).
147
+ Traditional numerical solvers often tackle {Tk}M
148
+ k=1 independently, without lever-
149
+ aging or transferring knowledge across tasks. The PINN method [38] was designed to
150
+ solve ODEs/PDEs independently using NNs, which, however, yields M uncorrelated
151
+ results. In this paper instead we treat {Tk}M
152
+ k=1 as a whole and connect them with MH-
153
+ PINNs, the architecture of which, shown in Fig. 1, enforces basis-functions-sharing
154
+ predictions on the solutions uk. In addition to the informative representation/body,
155
+ we further relate {Tk}M
156
+ k=1 by assuming that their corresponding heads in MH-PINNs,
157
+ denoted as {Hk}M
158
+ k=1, are samples of a random variable with unknown probability
159
+ density function (PDF), denoted as H and p(H), respectively. The shared body and
160
+ a generative model of H immediately form a generative model of the solution u, and
161
+ generators of the source term f and the boundary/initial term b as well by substitut-
162
+ ing u into Eq. (2.1) and automatic differentiation [1], from which a generative method
163
+ for approximating stochastic processes is seamlessly developed.
164
+ Generators of u, f and b, as discussed in [30], are able to provide an informative
165
+ prior distribution in physics-informed Bayesian inference [43, 26] as well as in UQ
166
+ for SciML [47, 36], where the informative prior compensates for the insufficiency of
167
+ observational data to address the physics-informed learning problems with even a few
168
+ noisy measurements. In this paper, we generalize such problem to deterministic cases
169
+ as well, where the data is noiseless and methods and results are deterministic, and
170
+ refer to it as few-shot physics-informed learning. The general idea is to apply prior
171
+ knowledge learned from connecting {Tk}M
172
+ k=1 with MH-PINNs to new tasks, denoted
173
+ as ˜T , associated with insufficient data ˜D, for accurate and trustworthy predictions.
174
+
175
+ 4
176
+ Z. ZOU AND G. E. KARNIADAKIS
177
+ The schematic view of the learning framework is illustrated in Fig. 2, and the details
178
+ are explained next.
179
+ Fig. 2. Schematic view of the learning framework and the proposed method. Three general
180
+ types of learning are addressed: physics-informed learning, generative modeling, and few-shot learn-
181
+ ing. The physics-informed learning is performed with MH-PINNs; the generative modeling is done
182
+ afterwards by density estimate over the head via normalizing flows (NFs); in the end the few-shot
183
+ physics-informed learning is accomplished with prior knowledge obtained from previous two via either
184
+ fine-tuning with the learned regularization or Bayesian inference with the learned prior distribution.
185
+ The body represents the set of basis functions learned from solving {Tk}M
186
+ k=1 with MH-PINNs, and
187
+ the density of the head, estimated from its samples using NFs, acts as the regularization, the prior
188
+ distribution, or the generator together with the body, depending on the usage of MH-PINNs in ap-
189
+ plications.
190
+ 2.1. Multi-head physics-informed neural networks (MH-PINNs). Hard
191
+ parameter sharing is the most commonly used approach when MTL with NNs are
192
+ considered, and MH-NNs, as its simplest instance, are frequently adopted [5, 39].
193
+ A MH-PINN, as described earlier, is composed of a body and multiple heads. We
194
+ denote by Φ the body and by Hk the head for Tk. Notice that here Φ : RDx → RL is
195
+ a function parameterized by a neural network with parameter θ, and Hk ∈ RL+1 is a
196
+ vector, where L is the number of neurons on the last layer of the body. Let us define
197
+ Hk = [h0
198
+ k, h1
199
+ k, ..., hL
200
+ k ]T , Φ(x) = [φ1(x), ..., φL(x)]T , where φ : RDx → R, and then the
201
+ surrogate for the solution in Tk can be rewritten as ˆuk(x) = h0
202
+ k+�L
203
+ l=1 hl
204
+ kφl(x), ∀x ∈ Ω.
205
+ The approximated source terms and boundary/initial terms are derived from Eq. (2.1)
206
+ accordingly. In the MTL framework , given data {Dk}M
207
+ k=1 and physics Eq. (2.1), the
208
+ loss function L is formulated as follows:
209
+ (2.2)
210
+ L({Dk}M
211
+ k=1; θ, {Hk}M
212
+ k=1) = 1
213
+ M
214
+ M
215
+
216
+ k=1
217
+ Lk(Dk; θ, Hk),
218
+ where Lk denotes the common loss function in PINNs. Conventionally, the data for
219
+ Tk is expressed as Dk = {Df
220
+ k, Db
221
+ k, Du
222
+ k}, where Df
223
+ k = {xi
224
+ k, f i
225
+ k}
226
+ N f
227
+ k
228
+ i=1, Db
229
+ k = {xi
230
+ k, bi
231
+ k}N b
232
+ k
233
+ i=1 and
234
+
235
+ samples of head
236
+ Learned distribution of head
237
+ 1.2
238
+ 1.2
239
+ 1.0
240
+ 1.0 -
241
+ 0.8
242
+ 0.8
243
+ learned by normalizing
244
+ 0.6
245
+ 0.6
246
+ flows
247
+ 0.4
248
+ 0.4 -
249
+ 0.2
250
+ 0.2
251
+ 0.0
252
+ 0.0
253
+ -1.0
254
+ -0.5
255
+ 0.0
256
+ 0.5
257
+ 1.0
258
+ 1.5
259
+ 1.0
260
+ -0.5
261
+ 0.0
262
+ 0.5
263
+ 1.0
264
+ generative modeling { Hkl
265
+ prior knowledge
266
+ new tasks T
267
+ Fine-tuning
268
+ Body
269
+ Bayesian inference
270
+ 1.00-0.750.500.250.000.250.500.751.00L-HYDRA
271
+ 5
272
+ Du
273
+ k = {xi
274
+ k, ui
275
+ k}N u
276
+ k
277
+ i=1, and Lk as follows:
278
+ (2.3)
279
+ Lk(Dk; θ, Hk) = wf
280
+ k
281
+ N f
282
+ k
283
+ N f
284
+ k
285
+
286
+ i=1
287
+ ||Fk(ˆuk(xi
288
+ k)) − f i
289
+ k||2 + wb
290
+ k
291
+ N b
292
+ k
293
+ N b
294
+ k
295
+
296
+ i=1
297
+ ||B(ˆuk(xi
298
+ k)) − bi
299
+ k||2
300
+ + wu
301
+ k
302
+ N u
303
+ k
304
+ N u
305
+ k
306
+
307
+ i=1
308
+ ||ˆuk(xi
309
+ k) − ui
310
+ k||2 + R(θ, Hk),
311
+ where || · || represents a properly chosen norm, R(·) is a regularization method over
312
+ the parameters of NNs, N f
313
+ k , N b
314
+ k, N u
315
+ k are the numbers of data points for fk, bk, uk, and
316
+ wf
317
+ k, wb
318
+ k, wu
319
+ k are weights to balance different terms in the loss function.
320
+ 2.2. Generative modeling and normalizing flows (NFs). As mentioned
321
+ earlier, MH-PINNs connect {Tk}M
322
+ k=1 by making two assumptions: (1) the solutions
323
+ uk, k = 1, ..., M share the same set of basis functions, Φ; and (2) the corresponding
324
+ coefficients are samples of the same random variable, H. In [7], Φ was used as a carrier
325
+ of prior knowledge from {Tk}M
326
+ k=1 in downstream physics-informed learning tasks. In
327
+ this work, we extend it by utilizing the information from the head as well by estimating
328
+ the PDF and a generator of H from its samples, {Hk}M
329
+ k=1, using normalizing flows
330
+ (NFs). The interested readers are directed to [32, 22] for reviews of NFs as well as
331
+ [9, 33, 20] for developments of some popular NFs.
332
+ We choose NFs over other commonly used generative models, e.g., generative
333
+ adversarial networks (GANs) [13], variational auto-encoders (VAEs) [21], or diffusion
334
+ models [16], because the NF serves as both a density estimator and a generator. The
335
+ former is able to provide proper regularization in the downstream few-shot physics-
336
+ informed learning tasks, while the latter leads to a physics-informed generative method
337
+ for approximating stochastic processes. It is worth noting that in previous works on
338
+ physics-informed generative methods [44, 46, 15], NNs are trained by measurements
339
+ over uk, and/or fk, and/or bk. Our model, on the other hand, learns through samples
340
+ of the head, which is obtained from MTL in the first step. This learning strategy
341
+ brings two substantial advantages: (1) flexibility in dealing with unstructured data,
342
+ e.g., inconsistent measurements across tasks; (2) simplicity and controlability of the
343
+ training by decoupling the physics-informed learning and the generative modeling.
344
+ 2.3. Prior knowledge utilized in the downstream tasks. Here, we describe
345
+ details on how to utilize the prior knowledge stored in MH-PINNs, for downstream
346
+ few-shot physics-informed learning task, ˜T , which is defined the same as all other
347
+ tasks in the upstream training, but with much fewer measurements. Training of MH-
348
+ PINNs and NFs yield a body, Φ, samples of heads, {Hk}M
349
+ k=1, and an estimated PDF
350
+ of the head, ˆp(H) ≈ p(H). In solving ˜T with ˜D, we fix the body Φ and find the head
351
+ ˜H that best explains the data ˜D and the physics in Eq. (2.1). Noiseless and noisy data
352
+ are considered in this paper: for noiseless data, regular NN training is performed on
353
+ the head for new tasks to provide deterministic predictions, where the learned PDF
354
+ of the head, ˆp(H), acts as a regularization term in the loss function; for noisy data,
355
+ Bayesian inference is performed on the head as well, in which ˆp(H) denotes the prior
356
+ distribution. Details are presented in the following.
357
+ 2.3.1. Regularization in optimization. Limited data in few-shot learning
358
+ often leads to over-fitting and/or poor inter-/extrapolation performance. In this re-
359
+ gard, regularizing the head according to its PDF is able to prevent over-fitting and
360
+
361
+ 6
362
+ Z. ZOU AND G. E. KARNIADAKIS
363
+ provide additional prior knowledge for better inter-/extrapolation performance. The
364
+ optimization problem is cast as
365
+ (2.4)
366
+ ˜H∗ = arg min
367
+ ˜
368
+ H
369
+ L∗( ˜D; ˜H), where L∗( ˜D; ˜H) = L( ˜D; ˜H) − α log p( ˜H)
370
+ ≈ L( ˜D; ˜H) − α log ˆp( ˜H),
371
+ where L is the regular loss function in physics-informed learning for data ˜D and param-
372
+ eter ˜H, and α ≥ 0 is the coefficient to adjust the regularization effect. Problem (2.4)
373
+ in this work is solved with gradient descent.
374
+ 2.3.2. Prior distribution in Bayesian inference. As opposed to point esti-
375
+ mate obtained by solving the optimization problem (2.4), the posterior distribution
376
+ of the head in ˜T is obtained using Bayesian inference. Similar as in [30, 47], the
377
+ posterior distribution of ˜H is established as follows:
378
+ (2.5)
379
+ p( ˜H| ˜D) ∝ p( ˜D| ˜H)p( ˜H) ≈ p( ˜D| ˜H)ˆp( ˜H),
380
+ where p( ˜H| ˜D) is the posterior distribution, p( ˜D| ˜H) is the likelihood distribution,
381
+ which is often assumed to be independent Gaussian over all measurements in ˜D,
382
+ and ˆp is the estimated PDF of the head via NFs. Intractability of distribution (2.5)
383
+ requires approximation methods, among which Markov chain Monte Carlo methods,
384
+ such as Hamiltonian Monte Carlo (HMC) [31], generally provide the most accurate
385
+ estimation.
386
+ Moreover, the relatively low dimension of ˜H also enables the use of
387
+ Laplace’s approximation (LA) [18], which is employed in this paper as an alternative
388
+ to HMC.
389
+ 3. Related works. Deep NNs in recent years have been extensively investigated
390
+ for solutions of ODEs/PDEs, SDEs as well as operator learning. Although not explic-
391
+ itly introduced as MH-PINNs, MH-NNs were first used to solve ODEs/PDEs by [7],
392
+ in which MH-PINNs were pre-trained on multiple similar tasks, and then the heads
393
+ were discarded while the body was kept and transferred to solving new tasks, by either
394
+ least square estimate for linear ODEs/PDEs, or fine-tuning with gradient descent for
395
+ nonlinear ones. In [7], a one-shot transfer learning algorithm for linear problems was
396
+ proposed but other potential uses of MH-NNs, e.g., MTL and generative modeling,
397
+ were not discussed, as opposed to the work presented herein. Furthermore, [7] focused
398
+ only on fast and deterministic predictions with high accuracy using sufficient clean
399
+ data, while in this paper, we study the applicability of MH-NNs to few-shot physics-
400
+ informed learning as well, where data is insufficient and/or noisy, and address such
401
+ cases with UQ. We note that MH-NN was also used as a multi-output NN in [45],
402
+ which, however, focused on solving single tasks and obtaining uncertainties.
403
+ Generative modeling in the context of scientific computing has also been studied
404
+ recently, and a few attempts for adopting deep generative NNs to SciML problems
405
+ have been made in [44, 46, 15], most of which have focused on approximating stochas-
406
+ tic processes and on solving SDEs. We propose a new physics-informed generative
407
+ method, as an alternative to the current ones, using MH-PINNs, and test it in approxi-
408
+ mating stochastic processes. In this regard, our method is functionally the same as the
409
+ current ones, but technically different. All previous methods address physics-informed
410
+ generative modeling using end-to-end learning strategies by coupling two dissimilar
411
+ types of learning, physics-informed learning and generative modeling, which may be
412
+ problematic for implementation and usage in practice when either type of learning
413
+ becomes more complicated. Our method, on the other hand, addresses the problem in
414
+
415
+ L-HYDRA
416
+ 7
417
+ an entirely new angle by decoupling those two: physics-informed learning is performed
418
+ first and is followed by learning generators. To this end, our method is a two-step
419
+ method, and with the help of well-developed algorithms from both fields, our method
420
+ has advantages both in flexibility and simplicity in implementation.
421
+ 4. Results. In this section, we test our method using five benchmarks. The first
422
+ one is a pedagogical function regression, in which we aim to demonstrate the basic
423
+ applicability and capabilities of our method, showing the importance of incorporating
424
+ the distribution of the head in the downstream tasks and in obtaining results with
425
+ or without uncertainty. The second example is a nonlinear ODE system, in which
426
+ we test our method in approximating stochastic processes through a differential op-
427
+ erator, compare different NFs, and eventually compare our method with another
428
+ well-known physics-informed generative model, physics-informed GANs (PI-GANs)
429
+ [44] in generative modeling. The third is a 1-D nonlinear reaction-diffusion equation,
430
+ the fourth is a 2-D nonlinear Allen-Cahn equation, and the fifth is the 2-D stochastic
431
+ Helmholtz equation with 20 dimensions. In all examples unless stated otherwise, data
432
+ for {Dk}M
433
+ k=1 are noise-free and task-wisely sufficient, while ˜D in downstream tasks is
434
+ insufficient, which makes the downstream tasks of the few-shot type. In addition, ex-
435
+ cept for the first example, results from Bayesian inference are obtained by employing
436
+ HMC, and the predicted mean denoted as µ and predicted standard deviation denoted
437
+ as σ are computed from the posterior samples of functions or unknown parameters.
438
+ The predicted uncertainty is defined as 2σ in this paper.
439
+ 4.1. Function approximation. We start with a function regression problem
440
+ using only data and no physics, which is a degenerate instance of Eq. (2.1) with Fk
441
+ being fixed as an identity operator, no B and bk, and uk = fk being task-specific. In
442
+ this case, Dk and ˜D are given as {(xi
443
+ k, f i
444
+ k)}Nk
445
+ i=1 and {(xi, f i)}N
446
+ i=1, respectively, and Tk
447
+ and ˜T are defined as approximating functions fk and ˜f from Dk and ˜D, respectively.
448
+ The stochastic function f in this example is defined as follows:
449
+ (4.1)
450
+ f(x) = A cos(ωx) + 2βx, x ∈ [−1, 1],
451
+ A ∼ U[1, 3), ω ∼ U[2π, 4π), P(β = ±1) = 0.5,
452
+ where U stands for uniform distribution and P(Ξ) is defined as the probability of
453
+ the event Ξ. Our goal is to approximate f from data with MH-NNs and NFs, and
454
+ solving the downstream few-shot regression tasks ˜T as well, in which two functions,
455
+ 2 cos(2πx)+2x and 2 cos(4πx)−2x, are regressed from 4 and 5 measurements equidis-
456
+ tantly distributed on [−0.9, −0.1], respectively.
457
+ For the training of MH-NNs and NFs, 1, 000 f subject to Eq. (4.1) are sampled,
458
+ each of which forms a regression task with 40 measurements sampled equidistantly on
459
+ [−1, 1] as data. Samples of f for training are displayed in Fig. 3(a). Both noiseless
460
+ and noisy data cases are considered in the few-shot regression tasks. As described in
461
+ Sec. 2.3, the former is solved by fine-tuning the head using gradient descent, while
462
+ the latter is solved by estimating the posterior distribution (Eq. (2.5)) using HMC
463
+ and LA. The noise ε is assumed to be independent additive Gaussian noise with scale
464
+ 0.2, i.e., ε ∼ N(0, 0.22). In the downstream few-shot regression tasks we compare our
465
+ method with two other approaches, the transfer learning (TL) method from [7], which
466
+ only transfers the body, Φ, and the regular NN method, in which no prior knowledge
467
+ is employed and all parameters of NN are trained.
468
+ Results for approximating f and solving the downstream tasks are presented in
469
+ Fig. 3. As shown in Fig. 3(a), our method approximates the stochastic function f
470
+
471
+ 8
472
+ Z. ZOU AND G. E. KARNIADAKIS
473
+ (a)
474
+ (b)
475
+ (c)
476
+ Fig. 3. Results for approximating the stochastic function defined in Eq. (4.1) and solving the
477
+ downstream few-shot regression tasks. (a) Left: 1, 000 samples generated from the exact distribu-
478
+ tion; middle: 1, 000 samples generated from the learned generator; right: statistics computed from
479
+ samples, in which we refer to the interval of mean ± 2 standard deviations as bound. (b)/(c) Results
480
+ for the downstream tasks. Left: results for noiseless cases using our method, the transfer learning
481
+ (TL) method in [7], and regular NN method; middle: results for noisy case using our method with
482
+ HMC for posterior estimate; right: results for the same noisy case using our method with LA for
483
+ posterior estimate.
484
+ well, demonstrating the capability of MH-NNs in generative modeling. In solving the
485
+ downstream tasks with noiseless data, L2 regularization is imposed in the TL method
486
+ and regular NN method, to prevent over-fitting when only 4 or 5 measurements are
487
+ available. As we can see from Figs. 3(b) and (c), our approach yields accurate predic-
488
+ tions and performs significantly better than the other two in both tasks, particularly
489
+ in the region where there are no measurements. By comparing our approach with
490
+ the NN method, we can see that prior knowledge of f is learned from {Tk}M
491
+ k=1 and
492
+ transferred successfully to new downstream tasks. By comparing our approach with
493
+ the TL method, we can see that the prior knowledge is stored in both the body and
494
+ (the distribution of) the head. For the noisy cases, it is shown that, for both tasks and
495
+ both posterior estimating methods, the predictions are accurate and trustworthy: the
496
+ predicted means agree with the references and the errors are bounded by the predicted
497
+ uncertainties. It is worth noting that the predicted uncertainties do not develop in the
498
+ interval [0, 1] and show periodic patterns, even if there are no measurements. That is
499
+ because an informative prior, which is learned by MH-NNs and NFs, is imposed on
500
+ the head in Bayesian inference.
501
+ The target functions in downstream tasks considered previously are chosen to be
502
+ in-distribution. They are regressed well with insufficient data, mainly because they
503
+
504
+ 4
505
+ 3
506
+ 2
507
+ 0
508
+ -1
509
+ -2
510
+ 4
511
+ -1
512
+ -0.8
513
+ -0.6
514
+ -0.4
515
+ -0.2
516
+ 0
517
+ 0.2
518
+ 0.4
519
+ 0.6
520
+ 0.84
521
+ 3
522
+ 2
523
+ 0
524
+ -1
525
+ -2
526
+ -3
527
+ 4
528
+ -1
529
+ -0.8
530
+ -0.6
531
+ -0.4
532
+ -0.2
533
+ 0
534
+ 0.2
535
+ 0.4
536
+ 0.6
537
+ 0.86
538
+ -1
539
+ -0.8
540
+ -0.6
541
+ -0.4
542
+ -0.2
543
+ 0
544
+ 0.2
545
+ 0.4
546
+ 0.6
547
+ 0.86
548
+ -1
549
+ -0.8
550
+ -0.6
551
+ -0.4
552
+ -0.2
553
+ 0
554
+ 0.2
555
+ 0.4
556
+ 0.6
557
+ 0.8Predicted mean
558
+ Predicted bound
559
+ Reference mean
560
+ Reference bound
561
+ 2
562
+ .2
563
+ 6
564
+ -1
565
+ -0.8
566
+ -0.6
567
+ -0.4
568
+ -0.2
569
+ 0
570
+ 0.2
571
+ 0.4
572
+ 0.6
573
+ 0.85
574
+ Measurements
575
+ Reference
576
+ Ours
577
+ 3
578
+ TL
579
+ 2
580
+ NN
581
+ 0
582
+ 1
583
+ -2
584
+ -3
585
+ 4
586
+ -1
587
+ -0.8
588
+ -0.6
589
+ -0.4
590
+ -0.2
591
+ 0
592
+ 0.2
593
+ 0.4
594
+ 0.6
595
+ 0.85
596
+ 2 std
597
+ 4
598
+ Measurements
599
+ Reference
600
+ 3
601
+ Mean
602
+ 2
603
+ 0
604
+ -1
605
+ -2
606
+ -3
607
+ 4
608
+ -1
609
+ -0.8
610
+ -0.6
611
+ -0.4
612
+ -0.2
613
+ 0
614
+ 0.2
615
+ 0.4
616
+ 0.6
617
+ 0.85
618
+ 2 std
619
+ 4
620
+ Measurements
621
+ Reference
622
+ 3
623
+ Mean
624
+ 2
625
+ 0
626
+ -1
627
+ -2
628
+ -3
629
+ 4
630
+ -1
631
+ -0.8
632
+ -0.6
633
+ -0.4
634
+ -0.2
635
+ 0
636
+ 0.2
637
+ 0.4
638
+ 0.6
639
+ 0.85
640
+ 4
641
+ 0
642
+ -1
643
+ -2
644
+ -3
645
+ -1
646
+ -0.8
647
+ -0.6
648
+ -0.4
649
+ -0.2
650
+ 0
651
+ 0.2
652
+ 0.4
653
+ 0.6
654
+ 0.8L-HYDRA
655
+ 9
656
+ belong to the space of functions, on which the generator is trained. However, when
657
+ functions in the downstream tasks are out-of-distribution (OOD), our approach fails
658
+ to produce good predictions, even if the data is sufficient, as shown in Fig. 4. Here,
659
+ the target function is chosen to be 2 cos(4.5π) + x with both ω and β being OOD.
660
+ Fluctuations are predicted but do not match the reference. In Fig. 4, we can further
661
+ see that when data is sufficient, a NN trained from scratch significantly outperforms
662
+ our approach, showing that, for OOD functions, the more we rely on the learned
663
+ regularization, which is indicated by the value of α in Eq. (2.4), the more erroneous
664
+ the prediction is.
665
+ Fig. 4. Results for regression on an out-of-distribution function. Left: few-shot regression with
666
+ clean data using our approach; middle: few-shot regression with noisy data using our approach with
667
+ HMC for posterior estimate; right: regression with sufficient clean data using regular NN method
668
+ and our approach with different regularization terms, α in Eq. (2.4).
669
+ 4.2. Nonlinear ODE system. In this example, we consider the following ODE
670
+ system [28], which describes the motion of a pendulum with an external force:
671
+ (4.2)
672
+ du1
673
+ dt = u2,
674
+ du2
675
+ dt = −λ sin(u1) + f(t),
676
+ with initial condition u1(0) = u2(0) = 0. In Eq. (4.2), f is the external force and
677
+ λ is a constant. Here, to demonstrate and study the capability of our method in
678
+ generative modeling, we first consider the case where f is a Gaussian process and
679
+ λ = 1 is known, which is referred to as the forward problem. Different from previous
680
+ studies [44, 46, 15], in which a stochastic process is approximated directly by the
681
+ output of NNs, in this example we place the differential operator right after NNs and
682
+ approximate the stochastic process f as the source term in Eq. (2.1). We also test
683
+ our method on the inverse problem, where the values of λ in Eq. (4.2) are unknown in
684
+ {Tk}M
685
+ k=1 and ˜T . The forward problem corresponds to Eq. (2.1) with Fk, bk being the
686
+ same for all tasks and uk, fk being task-specific, while the inverse problem corresponds
687
+ to Eq. (2.1) with bk being the same and uk, fk, and the differential operator Fk being
688
+ different as a consequence of task-specific λ.
689
+ 4.2.1. Forward problem. We first assume λ = 1 in Eq. (4.2) is known, and
690
+ the data on f is available, i.e., Dk = {(xi
691
+ k, f i
692
+ k)}Nk
693
+ i=1, k = 1, ..., M. As described before,
694
+ we employ MH-PINNs to solve {Tk}M
695
+ k=1 all at once and then employ NFs to learn the
696
+ distribution of the head. Consequently, we obtain generators of f and u. In this case,
697
+ f is assumed to be a Gaussian process with squared kernel function:
698
+ (4.3)
699
+ f(t) ∼ GP(0, K), t ∈ [0, 1], K(x, x′) = exp(−|x − x′|2
700
+ 2l2
701
+ ),
702
+ where the correlation length l is set to 0.1, 0.25, 0.5. As discussed in Sec. 2.2, many
703
+ types of NFs have been developed in the past decade for generative modeling and
704
+
705
+ 5
706
+ Measurements
707
+ Reference
708
+ Ours
709
+ 3
710
+ 0
711
+ -2
712
+ -3
713
+ -0.8
714
+ -0.6
715
+ -0.4
716
+ -1
717
+ -0.2
718
+ 0
719
+ 0.2
720
+ 0.4
721
+ 0.6
722
+ 0.85
723
+ 2 std
724
+ 4
725
+ Measurements
726
+ Reference
727
+ 3
728
+ Mean
729
+ 2
730
+ -2
731
+ -3
732
+ 4
733
+ -1
734
+ -0.8
735
+ -0.6
736
+ -0.4
737
+ -0.2
738
+ 0
739
+ 0.2
740
+ 0.4
741
+ 0.6
742
+ 0.85
743
+ Measurements
744
+ 4
745
+ Reference
746
+ NN
747
+ 3
748
+ = 10-6
749
+ 2
750
+ = 10-4
751
+ 10-2
752
+ 0
753
+ -2
754
+ -3
755
+ 4
756
+ -1
757
+ -0.8
758
+ -0.6
759
+ -0.4
760
+ -0.2
761
+ 0
762
+ 0.2
763
+ 0.4
764
+ 0.6
765
+ 0.8
766
+ 110
767
+ Z. ZOU AND G. E. KARNIADAKIS
768
+ density estimate. MH-PINNs, when used as generators, are compatible with all NFs.
769
+ In this regard, we compare three popular NFs, RealNVP [9], MAF [33] and IAF [20],
770
+ and eventually compare MH-PINNs (with NFs) against PI-GAN [44] in approximating
771
+ the Gaussian process defined in Eq. (4.3) with different correlation lengths.
772
+ For the training of MH-PINNs and NFs as well as PI-GANs, 2, 000 f are sampled
773
+ with respect to Eq. (4.3), each of which forms a physics-informed regression task with
774
+ 65 measurements of f equidistantly sampled on [0, 1] as data. Notice that the ODE
775
+ system in Eq. (4.2) can be rewritten in a simpler format as follows:
776
+ (4.4)
777
+ utt = −λ sin(u) + f(t), t ∈ [0, 1],
778
+ with initial conditions u(0) = ut(0) = 0. Hence, we choose to use Eq. (4.4) to build
779
+ the loss function for physics-informed learning.
780
+ Results for comparisons are shown in Fig. 5 and Table 1.
781
+ From the spectral
782
+ analysis of the approximated Gaussian processes shown in Fig. 5, we can see that
783
+ MH-PINNs with MAF and IAF are comparable with PI-GANs while MH-PINNs
784
+ with RealNVP fall marginally behind. As shown in Table 1, the computational costs
785
+ of MH-PINNs with MAF and RealNVP are significantly lower than PI-GANs, while
786
+ MH-PINNs with IAF is more expensive than PI-GANs. We note that in this example
787
+ we also record the computational cost for sampling using different generators. As
788
+ shown in Table 1, PI-GANs are significantly faster in generating samples. That is
789
+ because, generally, GANs require relatively shallow NNs as opposite to NFs, for which
790
+ a deep architecture is needed to keep up the expressivity. Among three NFs, IAF is
791
+ the fastest in sampling while MAF is the lowest, as opposed to training, which is
792
+ consistent with the properties of those two NFs: MAF is slow for the forward pass,
793
+ which is used to generate samples, and fast for the inverse pass, which is used to
794
+ compute the density, while IAF is the opposite. Despite the fact that MAF is slow in
795
+ sampling, considering its fast training and good performance, we equip MH-PINNs
796
+ with MAF as the density estimator and the generator for all other examples in this
797
+ paper.
798
+ Fig. 5.
799
+ Approximating Gaussian processes as the source term in Eq. (4.2) using different
800
+ models:
801
+ spectra of the correlation structure for the learned generators, for different correlation
802
+ lengths, l. The covariance matrix is constructed using 10, 000 generated samples, and eigen-values
803
+ are averaged over 10 generators trained independently.
804
+ 4.2.2. Inverse problem. Next, we assume λ in Eq. (4.2) is unknown, and some
805
+ measurements of u are available, in addition to f, i.e., Dk = {{xi
806
+ k, f i
807
+ k}
808
+ N f
809
+ k
810
+ i=1, {xi
811
+ k, ui
812
+ k}N u
813
+ k
814
+ i=1}.
815
+ MH-PINNs are first employed to infer uk as well as λk from data Dk and physics, and
816
+ NFs are employed afterwards to learn from samples of Hk and λk. To this end, the
817
+ generative model is for the joint distribution of u, f and λ. Here, we assume f follows
818
+ a truncated Karhuen-Loeve (KL)-expansion, with 5 leading terms, of the Gaussian
819
+ process with squared kernel function and correlation length 0.1, and for each task Tk,
820
+
821
+ 1 = 0.1
822
+ 0.3
823
+ e-Reference
824
+ PI-GAN
825
+ 0.25
826
+ MAF
827
+ IAF
828
+ 0.2
829
+ RealNVP
830
+ eigenvalues
831
+ 0.15
832
+ 0.1
833
+ 0.05
834
+ 0
835
+ 0
836
+ 5
837
+ 10
838
+ 15
839
+ components1 = 0.25
840
+ 0.7
841
+ 0.6
842
+ 0.5
843
+ eigenvalues
844
+ 0.4
845
+ 0.3
846
+ 0.2
847
+ 0.1
848
+ 0
849
+ 0
850
+ 5
851
+ 10
852
+ 15
853
+ components1 = 0.5
854
+ 0.9
855
+ 0.8
856
+ 0.7
857
+ 0.6
858
+ 0.5
859
+ 0.4
860
+ 0.3
861
+ 0.2
862
+ 0.1
863
+ 0
864
+ 0
865
+ 5
866
+ 10
867
+ 15
868
+ componentsL-HYDRA
869
+ 11
870
+ MAF
871
+ IAF
872
+ RealNVP
873
+ PI-GAN
874
+ Phase 1
875
+ 134s
876
+ 134s
877
+ 134s
878
+ N/A
879
+ Phase 2
880
+ 252s
881
+ 3939s
882
+ 245s
883
+ N/A
884
+ Total
885
+ 386s
886
+ 4073s
887
+ 379s
888
+ 3243s
889
+ Sampling
890
+ 1.98 × 10−1s
891
+ 1.48 × 10−2s
892
+ 1.50 × 10−2s
893
+ 2.29 × 10−3s
894
+ Table 1
895
+ Computational time for different models to approximate Gaussian process with correlation
896
+ length l = 0.1.
897
+ The MH-PINN method is a two-step method and hence its computation is de-
898
+ composed into two parts: training MH-PINNs referred to as phase 1 and training NFs referred to
899
+ as phase 2. Sampling time is defined to be the average time needed to generate 10, 000 samples of u.
900
+ λk = 1
901
+ 2 exp(
902
+
903
+ [0,1] f 2
904
+ k(t)dt). As for the downstream task ˜T , the target is to infer u and
905
+ λ from insufficient data of u and f.
906
+ For the training of MH-PINNs and NFs, 2, 000 samples of f are generated and
907
+ displayed in Fig. 6(a). For each task, we assume 33 measurements of fk and 9 mea-
908
+ surements of uk, equidistantly distributed on [0, 1], are available, and initial conditions
909
+ are hard-encoded in NN modeling. For the downstream task, we assume 1 random
910
+ measurement of u and 8 random measurements of f are available with hard-encoded
911
+ initial conditions as well.
912
+ For the case with noisy measurements, we assume the
913
+ noises εf and εu to be additive Gaussian, with 0.05 noise scale for measurements of
914
+ f and 0.005 noise scale for measurements of u, respectively, i.e. εf ∼ N(0, 0.052)
915
+ and εu ∼ N(0, 0.0052). The reference solution as well as the clean data of uk are
916
+ generated by solving Eq. (4.2) for each task Tk, with corresponding fk and λk using
917
+ Matlab ode45.
918
+ Results are shown in Fig. 6 and Table 2, from which we can see our method is able
919
+ to approximate the stochastic process as a source term well and produce accurate and
920
+ trustworthy predictions, for u, f and also λ in the downstream task with limited data,
921
+ in both noiseless and noisy cases. As shown, the PINN method yields unacceptable
922
+ estimate over both u and λ due to lack of data, while our approach is of much higher
923
+ accuracy by integrating prior knowledge from {Tk}M
924
+ k=1 with MH-PINNs.
925
+ PINN
926
+ MH-PINN
927
+ λ
928
+ 0.8440
929
+ 2.5428
930
+ Error (%)
931
+ 63.99
932
+ 1.21
933
+ Table 2
934
+ Estimate of λ and L2 relative error of u for the downstream inverse problem on Eq. (4.2) with
935
+ clean data, using our approach and the regular PINN method. The reference value for λ is 2.3609.
936
+ 4.3. 1-D nonlinear reaction-diffusion equation. We now test our method
937
+ on a 1-D nonlinear time-dependent reaction-diffusion equation, which is commonly
938
+ referred to as Fisher’s equation [2]:
939
+ ut = Duxx + ku(1 − u), t ∈ [0, 1], x ∈ [−1, 1],
940
+ (4.5)
941
+ u(t, −1) = u(t, 1) = 0, t ∈ [0, 1],
942
+ (4.6)
943
+ u(0, x) = u0(x), x ∈ [−1, 1],
944
+ (4.7)
945
+ where D = 0.1, k = 0.1 and u0(x) is the initial condition function. In this example,
946
+ we assume that the initial condition function is a stochastic process with the following
947
+
948
+ 12
949
+ Z. ZOU AND G. E. KARNIADAKIS
950
+ (a)
951
+ (b)
952
+ (c)
953
+ Fig. 6. Results for the inverse problem of the ODE system (4.2), with initial conditions hard-
954
+ encoded in NN modeling. (a) Left: 1, 000 samples of f, generated from the exact distribution; middle:
955
+ 1, 000 samples of f, generated from the learned generator; right: statistics computed from samples.
956
+ The bound is defined as the same as in the caption of Fig. 3. (b) Predicted f and u using PINNs
957
+ and our approach, for the downstream inverse problem with noiseless data. (c) Predicted f, u and
958
+ λ with uncertainties using our approach, for the downstream inverse problem with noisy data. The
959
+ predicted mean and standard deviation of λ is 2.4663 and 0.1501, while the reference value is 2.3609.
960
+ distribution:
961
+ (4.8)
962
+ u0(x) = (x2 − 1)
963
+ 5
964
+ 5
965
+
966
+ j=1
967
+ ξj(cos2(jx) − 1), x ∈ [−1, 1],
968
+ where ξj, j = 1, ..., 5 are independent and identically distributed (i.i.d.) random vari-
969
+ ables subject to uniform distribution on [0, 1), i.e., ξj ∼ U[0, 1).
970
+ Unlike previous
971
+ examples, the stochasticity comes from the initial condition rather than the source
972
+ term. This example corresponds to Eq. (2.1) with Fk and fk being the same for all
973
+ tasks, and uk and bk being task-specific. In addition to measurements of u0, points
974
+ on which the PDE residuals are computed are also required in both {Tk}M
975
+ k=1 and ˜T .
976
+ Hence, the data is Dk = {{(ti
977
+ k, xi
978
+ k), 0}
979
+ N f
980
+ k
981
+ i=1, {(0, xi
982
+ k), bi
983
+ k}N b
984
+ k
985
+ i=1}.
986
+ For the training, 2, 000 samples of u0(x) are generated, displayed in Fig. 7(a),
987
+ and each sample forms a physics-informed regression task with 41 measurements of
988
+ u0 equidistantly sampled on [−1, 1] as data for initial condition. Besides, for all tasks,
989
+ a uniform mesh 21 × 41 on temporal-spatial domain [0, 1] × [−1, 1] is used to com-
990
+ pute the PDE residual loss. For the downstream tasks ˜T , 5 random measurements
991
+ of u0 are available and the same uniform mesh is applied. The boundary conditions
992
+ are hard-encoded in NN modeling in both {Tk}M
993
+ k=1 and ˜T . For the noisy case, the
994
+
995
+ 4
996
+ 3
997
+ 2
998
+ -3
999
+ 0
1000
+ 0.1
1001
+ 0.2
1002
+ 0.3
1003
+ 0.4
1004
+ 0.5
1005
+ 0.6
1006
+ 0.7
1007
+ 0.8
1008
+ 0.94
1009
+ 3
1010
+ 2
1011
+ -3
1012
+ 0
1013
+ 0.1
1014
+ 0.2
1015
+ 0.3
1016
+ 0.4
1017
+ 0.5
1018
+ 0.6
1019
+ 0.7
1020
+ 0.8
1021
+ 0.94
1022
+ Predicted mean
1023
+ Predicted bound
1024
+ 3
1025
+ Reference mean
1026
+ Reference bound
1027
+ 2
1028
+ -3
1029
+ 0
1030
+ 0.1
1031
+ 0.2
1032
+ 0.3
1033
+ 0.4
1034
+ 0.5
1035
+ 0.6
1036
+ 0.7
1037
+ 0.8
1038
+ 0.9
1039
+ t2
1040
+ Measurements
1041
+ 1.5
1042
+ Reference
1043
+ Ours
1044
+ 1
1045
+ ..- PINN
1046
+ 0.5
1047
+ 0
1048
+ -0.5
1049
+ -1
1050
+ -1.5
1051
+ -2
1052
+ 0
1053
+ 0.1
1054
+ 0.2
1055
+ 0.3
1056
+ 0.4
1057
+ 0.5
1058
+ 0.6
1059
+ 0.7
1060
+ 0.8
1061
+ 0.9
1062
+ t0.15
1063
+ 0.1
1064
+ U
1065
+ 0.05
1066
+ 0
1067
+ -0.05
1068
+ 0
1069
+ 0.1
1070
+ 0.2
1071
+ 0.3
1072
+ 0.4
1073
+ 0.5
1074
+ 0.6
1075
+ 0.7
1076
+ 0.8
1077
+ 0.9
1078
+ t2
1079
+ 2 std
1080
+ 1.5
1081
+ Measurements
1082
+ Reference
1083
+ Mean
1084
+ 0.5
1085
+ 0
1086
+ -0.5
1087
+ -1
1088
+ -1.5
1089
+ -2
1090
+ 0
1091
+ 0.1
1092
+ 0.2
1093
+ 0.3
1094
+ 0.4
1095
+ 0.5
1096
+ 0.6
1097
+ 0.7
1098
+ 0.8
1099
+ 0.9
1100
+ t0.15
1101
+ 0.1
1102
+ 0.05
1103
+ 0
1104
+ -0.05
1105
+ 0
1106
+ 0.1
1107
+ 0.2
1108
+ 0.3
1109
+ 0.4
1110
+ 0.5
1111
+ 0.6
1112
+ 0.7
1113
+ 0.8
1114
+ 0.9
1115
+ 1
1116
+ t2.5
1117
+ Prior (learned from physics)
1118
+ Posterior
1119
+ 2
1120
+ 1.5
1121
+ 0.5
1122
+ 2
1123
+ 3
1124
+ 4
1125
+ 5
1126
+ 6L-HYDRA
1127
+ 13
1128
+ noise ε is assumed to be independent additive Gaussian noise with 0.02 noise scale
1129
+ for both measurements of u0 and the PDE residual, i.e., ε ∼ N(0, 0.022). Results are
1130
+ presented in Fig. 7 and Table 3. We can see that our method estimates a good gen-
1131
+ erator of the stochastic processes from data and physics, which provides informative
1132
+ prior knowledge in the downstream few-shot physics-informed regression tasks. The
1133
+ prediction is accurate in both noiseless and noisy cases, and the errors in the noisy
1134
+ case are bounded by the predicted uncertainty. The L2 error of u, shown in Table 3,
1135
+ indicates that our approach outperforms the PINN method by a significant amount,
1136
+ hence demonstrating the effectiveness of bringing prior knowledge into solving similar
1137
+ tasks.
1138
+ (a)
1139
+ (b)
1140
+ (c)
1141
+ Fig. 7.
1142
+ Generator learning and few-shot physics-informed learning on 1-D time-dependent
1143
+ reaction-diffusion equation (4.5), with boundary conditions hard-encoded in NN modeling. (a) Left:
1144
+ 1, 000 training samples of u0; middle: 1, 000 samples of u(0, ·) from the learned generator; right:
1145
+ statistics computed from samples. The bound is defined as the same as in the caption of Fig. 3. (b)
1146
+ Predicted u at t = 0, 0.5, 1 using our approach and the PINN method with noiseless measurements.
1147
+ (c) Predicted mean and uncertainty of u at t = 0, 0.5, 1 using our approach with HMC for posterior
1148
+ estimate, with noisy measurements.
1149
+ PINN
1150
+ MH-PINN
1151
+ Error (%)
1152
+ 78.77
1153
+ 0.22
1154
+ Table 3
1155
+ L2 relative error of u for the downstream few-shot physics-informed learning task on Eq. (4.5)
1156
+ with clean data of u0 using our approach and the PINN method.
1157
+
1158
+ 0.5
1159
+ 0.4
1160
+ 0.3
1161
+ 0=4
1162
+ 0.2
1163
+ u
1164
+ 0.1
1165
+ 0
1166
+ -0.1
1167
+ -1
1168
+ -0.8
1169
+ -0.6
1170
+ -0.4
1171
+ -0.2
1172
+ 0
1173
+ 0.2
1174
+ 0.4
1175
+ 0.6
1176
+ 0.80.5
1177
+ 0.4
1178
+ 0.3
1179
+ 0=4
1180
+ 0.2
1181
+ u
1182
+ 0.1
1183
+ 0
1184
+ -0.1
1185
+ -1
1186
+ -0.8
1187
+ -0.6
1188
+ -0.4
1189
+ -0.2
1190
+ 0
1191
+ 0.2
1192
+ 0.4
1193
+ 0.6
1194
+ 0.80.5
1195
+ Predicted mean
1196
+ Predicted bound
1197
+ Reference mean
1198
+ 0.4
1199
+ Reference bound
1200
+ 0.3
1201
+ 0=1
1202
+ 0.2
1203
+ 0.1
1204
+ -0.1
1205
+ -1
1206
+ -0.8
1207
+ -0.6
1208
+ -0.4
1209
+ -0.2
1210
+ 0
1211
+ 0.2
1212
+ 0.4
1213
+ 0.6
1214
+ 0.80.5
1215
+ Measurements
1216
+ Reference
1217
+ 0.4
1218
+ Ours
1219
+ ..-PINN
1220
+ 0.3
1221
+ 0=1
1222
+ 0.2
1223
+ 0.1
1224
+ 0
1225
+ -0.1
1226
+ -1
1227
+ -0.8
1228
+ -0.6
1229
+ -0.4
1230
+ -0.2
1231
+ 0
1232
+ 0.2
1233
+ 0.4
1234
+ 0.6
1235
+ 0.80.5
1236
+ 0.4
1237
+ 0.3
1238
+ t=0.5
1239
+ 0.2
1240
+ 0.1
1241
+ 0
1242
+ -0.1
1243
+ -1
1244
+ -0.8
1245
+ -0.6
1246
+ -0.4
1247
+ -0.2
1248
+ 0
1249
+ 0.2
1250
+ 0.4
1251
+ 0.6
1252
+ 0.80.5
1253
+ 0.4
1254
+ 0.3
1255
+ t=1
1256
+ 0.2
1257
+ u
1258
+ 0.1
1259
+ 0
1260
+ -0.1
1261
+ -1
1262
+ -0.8
1263
+ -0.6
1264
+ -0.4
1265
+ -0.2
1266
+ 0
1267
+ 0.2
1268
+ 0.4
1269
+ 0.6
1270
+ 0.80.5
1271
+ 2 std
1272
+ Measurements
1273
+ 0.4
1274
+ Reference
1275
+ Mean
1276
+ 0.3
1277
+ 0=1
1278
+ 0.2
1279
+ f
1280
+ 0.1
1281
+ 0
1282
+ -0.1
1283
+ -1
1284
+ -0.8
1285
+ -0.6
1286
+ -0.4
1287
+ -0.2
1288
+ 0
1289
+ 0.2
1290
+ 0.4
1291
+ 0.6
1292
+ 0.80.5
1293
+ 0.4
1294
+ 0.3
1295
+ t=0.5
1296
+ 0.2
1297
+ 0.1
1298
+ 0
1299
+ -0.1
1300
+ -1
1301
+ -0.8
1302
+ -0.6
1303
+ -0.4
1304
+ -0.2
1305
+ 0
1306
+ 0.2
1307
+ 0.4
1308
+ 0.6
1309
+ 0.80.5
1310
+ 0.4
1311
+ 0.3
1312
+ t=1
1313
+ 0.2
1314
+ u
1315
+ 0.1
1316
+ 0
1317
+ -0.1
1318
+ -1
1319
+ -0.8
1320
+ -0.6
1321
+ -0.4
1322
+ -0.2
1323
+ 0
1324
+ 0.2
1325
+ 0.4
1326
+ 0.6
1327
+ 0.814
1328
+ Z. ZOU AND G. E. KARNIADAKIS
1329
+ 4.4. 2-D nonlinear Allen-Cahn equation. We now move to a 2-D steady
1330
+ nonlinear Allen-Cahn equation with Dirichlet boundary conditions [43]:
1331
+ λ∆u + u(u2 − 1) = f, x, y ∈ [0, 1],
1332
+ (4.9)
1333
+ u(x, 0) = u(x, 1) = u(0, y) = u(1, y) = 0,
1334
+ (4.10)
1335
+ where λ = 0.1 is a constant and f is the source term. Here, we impose a distribution
1336
+ to f, which is derived from Eq. (4.9) and the following distribution of the solution u:
1337
+ (4.11)
1338
+ u(x, y) = 1
1339
+ 5
1340
+ 5
1341
+
1342
+ j=1
1343
+ ξj
1344
+ sin(jπx) sin(jπy)
1345
+ j2π2
1346
+ , x, y ∈ [0, 1],
1347
+ with i.i.d. random variables ξj, j = 1, ..., 5, subject to uniform distribution, i.e. ξj ∼
1348
+ U[0, 1). In this example, we wish to use our method to learn generators of both u and f
1349
+ from data of f and physics in Eq. (4.9), and use it to solve the downstream task ˜T with
1350
+ insufficient data ˜D. This example corresponds to Eq. (2.1) with Fk, bk being the same
1351
+ among tasks and fk, uk being task-specific, and the data is Dk = {(xi
1352
+ k, yi
1353
+ k), f i
1354
+ k}
1355
+ N f
1356
+ k
1357
+ i=1.
1358
+ To train the MH-PINNs and NFs, we sample 2, 000 f from its distribution, each
1359
+ of which is resolved with a 51 × 51 uniform mesh on 2-D spatial domain [0, 1] × [0, 1].
1360
+ As for the downstream task, 100 random measurements of f on the uniform mesh are
1361
+ assumed to be available. The noise is assumed to be independent additive Gaussian
1362
+ noise with 0.05 noise scale. In both Tk and ˜T , the boundary conditions are hard-
1363
+ encoded in NN modeling. Results as well as the locations of the measurements are
1364
+ presented in Fig. 8 and Table 4.
1365
+ Similar to all previous examples, our approach
1366
+ delivers accurate and trustworthy predictions, showing that prior knowledge is learned
1367
+ and transferred well in both deterministic and Bayesian inferences.
1368
+ (a)
1369
+ (b)
1370
+ Fig. 8. Results for few-shot physics-informed learning on the 2-D nonlinear Allen-Cahn equa-
1371
+ tion Eq. (4.9) with noisy measurements of f. Predicted mean µ and standard deviation σ are com-
1372
+ puted over 1, 000 posterior samples from HMC. The absolute error is defined as the absolute value
1373
+ of difference between the reference and µ. Black crosses represent the locations of the measurements
1374
+ on f.
1375
+
1376
+ reference of f
1377
+ 0.9
1378
+ 0.2
1379
+ 0.8
1380
+ 0.7
1381
+ 0.1
1382
+ 0.6
1383
+ 9
1384
+ 0.5
1385
+ 0
1386
+ 0.4
1387
+ -0.1
1388
+ 0.3
1389
+ 0.2
1390
+ -0.2
1391
+ 0.1
1392
+ 0
1393
+ -0.3
1394
+ 0
1395
+ 0.2
1396
+ 0.4
1397
+ 0.6
1398
+ 0.8predicted mean of f
1399
+ 1
1400
+ x
1401
+ X
1402
+ X
1403
+ X
1404
+ 0.9
1405
+ X
1406
+ X
1407
+ 0.2
1408
+ X
1409
+ 0.8
1410
+ X
1411
+ XX
1412
+ X
1413
+ +
1414
+ X
1415
+ X
1416
+ 0.7
1417
+ XX
1418
+ X
1419
+ 0.1
1420
+ X
1421
+ ×
1422
+ X
1423
+ X
1424
+ X
1425
+ 0.6
1426
+ X
1427
+ X
1428
+ X
1429
+ ++
1430
+ X
1431
+ x
1432
+ X
1433
+ 9
1434
+ 0.5
1435
+ X
1436
+ 0
1437
+ X
1438
+ X
1439
+ 0.4
1440
+ X
1441
+ X
1442
+ -0.1
1443
+ X
1444
+ X
1445
+ X
1446
+ 0.3
1447
+ × XX
1448
+ XX
1449
+ 0.2×
1450
+ X
1451
+ XX
1452
+ -0.2
1453
+ X
1454
+ X
1455
+ 0.1
1456
+ X
1457
+ X
1458
+ X
1459
+ X
1460
+ X
1461
+ X
1462
+ X
1463
+ 0
1464
+ -0.3
1465
+ 0
1466
+ 0.2
1467
+ 0.4
1468
+ 0.6
1469
+ 0.8
1470
+ 1absolute error of j
1471
+ 1
1472
+ 0.05
1473
+
1474
+ X
1475
+ XX
1476
+ X
1477
+ XX
1478
+ 0.9
1479
+ X
1480
+ 0.045
1481
+ X
1482
+ X
1483
+ X
1484
+ 0.8
1485
+ 0.04
1486
+ X
1487
+ X
1488
+ X
1489
+ X
1490
+ X
1491
+ 0.7
1492
+ XX
1493
+ 0.035
1494
+ X
1495
+ X
1496
+ X
1497
+ X
1498
+ 0.6
1499
+ X
1500
+ X
1501
+ 0.03
1502
+ X
1503
+ X
1504
+ ++
1505
+ X
1506
+ X
1507
+ 9
1508
+ 0.5
1509
+ X
1510
+ 0.025
1511
+ X
1512
+ X
1513
+ X
1514
+ X
1515
+ 0.4
1516
+ XX
1517
+ X
1518
+ X
1519
+ X
1520
+ X
1521
+ 0.02
1522
+ X
1523
+ X
1524
+ X
1525
+ X
1526
+ 0.3
1527
+ X
1528
+ 0.015
1529
+ XX
1530
+ XX
1531
+ XX ×
1532
+ X
1533
+ XX
1534
+ X
1535
+ 0.2×
1536
+ 0.01
1537
+ X
1538
+ XX
1539
+ X
1540
+ X
1541
+ 0.1
1542
+ X
1543
+ X
1544
+ 0.005
1545
+ X
1546
+ X
1547
+ X
1548
+ X
1549
+ X
1550
+ +
1551
+ 0
1552
+ 0
1553
+ 0
1554
+ 0.2
1555
+ 0.4
1556
+ 0.6
1557
+ 0.8
1558
+ 1predicted uncertainty of f
1559
+ 0.05
1560
+ 0.9
1561
+ 0.045
1562
+ 0.8
1563
+ 0.04
1564
+ 0.7
1565
+ 0.035
1566
+ 0.6
1567
+ 0.03
1568
+ 9
1569
+ 0.5
1570
+ 0.025
1571
+ 0.4
1572
+ 0.02
1573
+ 0.3
1574
+ 0.015
1575
+ 0.2
1576
+ 0.01
1577
+ 0.1
1578
+ 0.005
1579
+ 0
1580
+ 0
1581
+ 0
1582
+ 0.2
1583
+ 0.4
1584
+ 0.6
1585
+ 0.8reference of u
1586
+ 0.01
1587
+ 0.9
1588
+ 0
1589
+ 0.8
1590
+ 0.7
1591
+ -0.01
1592
+ 0.6
1593
+ 9
1594
+ 0.5
1595
+ -0.02
1596
+ 0.4
1597
+ -0.03
1598
+ 0.3
1599
+ 0.2
1600
+ -0.04
1601
+ 0.1
1602
+ 0
1603
+ -0.05
1604
+ 0
1605
+ 0.2
1606
+ 0.4
1607
+ 0.6
1608
+ 0.8
1609
+ 1predicted mean of u
1610
+ 0.01
1611
+ 0.9
1612
+ 0
1613
+ 0.8
1614
+ 0.7
1615
+ -0.01
1616
+ 0.6
1617
+ 9
1618
+ 0.5
1619
+ -0.02
1620
+ 0.4
1621
+ -0.03
1622
+ 0.3
1623
+ 0.2
1624
+ -0.04
1625
+ 0.1
1626
+ 0
1627
+ -0.05
1628
+ 0
1629
+ 0.2
1630
+ 0.4
1631
+ 0.6
1632
+ 0.8
1633
+ 1absolute error of u
1634
+ 0.01
1635
+ 0.9
1636
+ 0.009
1637
+ 0.8
1638
+ 0.008
1639
+ 0.7
1640
+ 0.007
1641
+ 0.6
1642
+ 0.006
1643
+ 9
1644
+ 0.5
1645
+ 0.005
1646
+ 0.4
1647
+ 0.004
1648
+ 0.3
1649
+ 0.003
1650
+ 0.2
1651
+ 0.002
1652
+ 0.1
1653
+ 0.001
1654
+ 0
1655
+ 0
1656
+ 0
1657
+ 0.2
1658
+ 0.4
1659
+ 0.6
1660
+ 0.8
1661
+ 1predicted uncertainty of u
1662
+ 0.01
1663
+ 0.9
1664
+ 0.009
1665
+ 0.8
1666
+ 0.008
1667
+ 0.7
1668
+ 0.007
1669
+ 0.6
1670
+ 0.006
1671
+ 9
1672
+ 0.5
1673
+ 0.005
1674
+ 0.4
1675
+ 0.004
1676
+ 0.3
1677
+ 0.003
1678
+ 0.2
1679
+ 0.002
1680
+ 0.1
1681
+ 0.001
1682
+ 0
1683
+ 0
1684
+ 0
1685
+ 0.2
1686
+ 0.4
1687
+ 0.6
1688
+ 0.8
1689
+ 1L-HYDRA
1690
+ 15
1691
+ PINN
1692
+ MH-PINN
1693
+ Error (%)
1694
+ 12.82
1695
+ 0.30
1696
+ Table 4
1697
+ L2 relative error of u for the downstream few-shot physics-informed learning task on Eq. (4.9)
1698
+ with clean data of f, using our approach and the PINN method.
1699
+ 4.5. 2-D Stochastic Helmholtz equation. The last example we test in this
1700
+ paper is the 2-D Helmholtz equation with stochastic source term and Dirichlet bound-
1701
+ ary conditions [35]:
1702
+ (λ2 − ∇2)u = f, x, y ∈ [0, 2π],
1703
+ (4.12)
1704
+ u(x, 0) = u(x, 2π) = u(0, y) = u(2π, y) = 0,
1705
+ (4.13)
1706
+ where λ2 is the Helmholtz constant and f is defined as follows:
1707
+ (4.14)
1708
+ f(x, y) = 2
1709
+ d{
1710
+ d/4
1711
+
1712
+ i=1
1713
+ ξi sin(ix) + ξi+d cos(ix) + ξi+2d sin(iy) + ξi+3d cos(iy)},
1714
+ where ξj, j = 1, ..., d are i.i.d. random variables subject to uniform distribution U[0, 1)
1715
+ and d represents the dimension of the randomness. For demonstration purposes, we
1716
+ consider the case where d = 20 in this paper, unlike the one in [35] with d = 100.
1717
+ The first case we study is the forward problem with λ2 = 1 known. This setup
1718
+ corresponds to Eq. (2.1) with Fk, bk being shared among tasks and uk, fk being task-
1719
+ specific. Next, we study the inverse problem with unknown λ, where data on u and f
1720
+ are available, which corresponds to Eq. (2.1) with only bk being the same and uk, fk
1721
+ and operator Fk being task-specific. The downstream tasks are defined as the same
1722
+ as {Tk}M
1723
+ k=1 in both cases, but with fewer measurements.
1724
+ For both the forward and inverse problems, 10, 000 f are sampled from its distri-
1725
+ bution, and hence 10, 000 tasks are solved with MH-PINNs with boundary conditions
1726
+ hard-encoded in NN modeling. We display the samples of a slice of f in Fig. 9(a).
1727
+ For the forward problem, Dk only contains measurements of the source term fk, i.e.
1728
+ Dk = {{(xi
1729
+ k, yi
1730
+ k), f i
1731
+ k}
1732
+ N f
1733
+ k
1734
+ i=1}, while for the inverse problem Dk also contains measure-
1735
+ ments of the sought solution uk: Dk = {{(xi
1736
+ k, yi
1737
+ k), f i
1738
+ k}
1739
+ N f
1740
+ k
1741
+ i=1, {(xi
1742
+ k, yi
1743
+ k), ui
1744
+ k}N u
1745
+ k
1746
+ i=1}. For the
1747
+ training in the forward problem, each sample of f is resolved by a 50 × 50 uniform
1748
+ mesh on 2-D spatial domain (0, 2π)×(0, 2π) with boundary excluded. For the inverse
1749
+ problem, the same 10, 000 samples of f are used, but this time they are resolved with
1750
+ a 21 × 21 uniform mesh. In addition, for each task Tk, measurements of uk on a 6 × 6
1751
+ uniform mesh are available. The reference solution and measurements of u are gen-
1752
+ erated by solving Eq. (4.12) with λ2
1753
+ k =
1754
+
1755
+ [0,2π]2 f 2
1756
+ k(x, y)dxdy using the finite difference
1757
+ method with five-point stencil. For the downstream tasks, 100 random measurements
1758
+ of f are available for the forward problem, and 50 random measurements of f and
1759
+ 10 random measurements of u are available for the inverse problem. The noise is
1760
+ assumed to be independent additive Gaussian noise with 0.05 noise scale.
1761
+ Results are displayed in Tables 5 and 6, and Figs. 9 and 10.
1762
+ As shown, the
1763
+ learned generator is able to produce samples of f with high quality as well as providing
1764
+ informative prior knowledge for the downstream tasks, in both the forward and inverse
1765
+ problems. As for the noisy case with Bayesian inference and UQ, the predicted means
1766
+ agree with the references and the absolute errors are mostly bounded by the predicted
1767
+ uncertainties. The effectiveness of our approach for few-shot physics-informed learning
1768
+
1769
+ 16
1770
+ Z. ZOU AND G. E. KARNIADAKIS
1771
+ and the applicability to both deterministic optimization and Bayesian inference have
1772
+ been consistently demonstrated in the past five examples.
1773
+ (a)
1774
+ (b)
1775
+ (c)
1776
+ Fig. 9. Generator learning and few-shot physics-informed learning on the stochastic Helmholtz
1777
+ equation (4.12). (a) Left: 1, 000 training samples of a slice of f at y = π; middle: 1, 000 samples of
1778
+ a slice of f at y = π from the learned generator; right: statistics computed from samples. (b)/(c)
1779
+ Results for the downstream forward problem with 100 random noisy measurements on f, using our
1780
+ approach with HMC. From left to right are reference, predicted mean, absolute error, and predicted
1781
+ uncertainty of f/u. Black crosses represent the locations of the measurements of f.
1782
+ PINN
1783
+ MH-PINN
1784
+ Error (%)
1785
+ 21.14
1786
+ 1.12
1787
+ Table 5
1788
+ L2 relative error of u for the downstream forward problem on Eq. (4.12) with clean data of f,
1789
+ using our approach and the PINN method.
1790
+ PINN
1791
+ MH-PINN
1792
+ λ
1793
+ 1.9328
1794
+ 1.0170
1795
+ Error (%)
1796
+ 59.92
1797
+ 2.58
1798
+ Table 6
1799
+ Estimate of λ and L2 relative error of u for the downstream inverse problem on Eq. (4.12) with
1800
+ clean data. The reference value of λ is 1.0042.
1801
+ 5. Multi-task learning with multi-head neural networks. So far we have
1802
+ mostly focused on using MH-NNs together with NFs to estimate stochastic generators
1803
+
1804
+ 0.4
1805
+ 0.2
1806
+ =/
1807
+ 9.
1808
+ 0
1809
+ U
1810
+ -0.2
1811
+ -0.4
1812
+ -0.6
1813
+ 0
1814
+ 2
1815
+ 3
1816
+ 4
1817
+ 5
1818
+ 60.4
1819
+ 0.2
1820
+ =
1821
+ 9.
1822
+ 0
1823
+ -0.2
1824
+ -0.4
1825
+ -0.6
1826
+ 0
1827
+ 2
1828
+ 3
1829
+ 4
1830
+ 5
1831
+ 6Predicted mean
1832
+ Predicted bound
1833
+ Reference mean
1834
+ 0.4
1835
+ Reference bound
1836
+ 0.2
1837
+ -0.2
1838
+ -0.4
1839
+ -0.6
1840
+ 0
1841
+ 2
1842
+ 3
1843
+ 4
1844
+ 5
1845
+ 6reference of f
1846
+ 6
1847
+ 5
1848
+ 0.5
1849
+ 4
1850
+ 9
1851
+ 3
1852
+ 0
1853
+ 2
1854
+ 1
1855
+ -0.5
1856
+ 2
1857
+ 3
1858
+ 4
1859
+ 5
1860
+ 6
1861
+ 1predicted mean of f
1862
+ 6
1863
+ X
1864
+ X
1865
+ X
1866
+ X
1867
+ X
1868
+ X
1869
+ XX
1870
+ 5
1871
+ X
1872
+ X
1873
+ X
1874
+ X
1875
+ X
1876
+ X
1877
+ 0.5
1878
+ 4
1879
+ X
1880
+ X
1881
+ X
1882
+ X
1883
+ 9
1884
+ X
1885
+ 3
1886
+ X
1887
+ X
1888
+ X
1889
+ XX
1890
+ 0
1891
+ 2 ×
1892
+ X
1893
+ X
1894
+ 1
1895
+ X
1896
+ X
1897
+ X
1898
+ -0.5
1899
+ 2
1900
+ 3
1901
+ 4
1902
+ 5
1903
+ 6absolute error of t
1904
+ 0.1
1905
+ 6
1906
+ 0.09
1907
+ XX
1908
+ 5
1909
+ 0.08
1910
+ 0.07
1911
+ 0.06
1912
+ 9
1913
+ 0.05
1914
+ 3
1915
+ 0.04
1916
+
1917
+ 0.03
1918
+ +
1919
+ 0.02
1920
+ 0.01
1921
+ X
1922
+ 0
1923
+ 2
1924
+ 3
1925
+ 4
1926
+ 5
1927
+ 6predicted uncertainty of f
1928
+ 0.1
1929
+ 6
1930
+ 0.09
1931
+ 0.08
1932
+ 0.07
1933
+ 4
1934
+ 0.06
1935
+ 9
1936
+ 0.05
1937
+ 3
1938
+ 0.04
1939
+ 2
1940
+ 0.03
1941
+ 0.02
1942
+ 1
1943
+ 0.01
1944
+ 0
1945
+ 2
1946
+ 3
1947
+ 4
1948
+ 5
1949
+ 6reference of u
1950
+ 6
1951
+ 0.04
1952
+ 5
1953
+ 0.02
1954
+ 0
1955
+ 4
1956
+ -0.02
1957
+ 9
1958
+ 3
1959
+ -0.04
1960
+ 2
1961
+ -0.06
1962
+ 1
1963
+ -0.08
1964
+ -0.1
1965
+ 1
1966
+ 2
1967
+ 3
1968
+ 4
1969
+ 5
1970
+ 6predicted mean of u
1971
+ 6
1972
+ 0.04
1973
+ 5
1974
+ 0.02
1975
+ 0
1976
+ 4
1977
+ -0.02
1978
+ 9
1979
+ 3
1980
+ -0.04
1981
+ 2
1982
+ -0.06
1983
+ 1
1984
+ -0.08
1985
+ -0.1
1986
+ 2
1987
+ 3
1988
+ 4
1989
+ 5
1990
+ 6
1991
+ 1absolute error of u
1992
+ 0.02
1993
+ 6
1994
+ 0.018
1995
+ 5
1996
+ 0.016
1997
+ 0.014
1998
+ 4
1999
+ 0.012
2000
+ 9
2001
+ 0.01
2002
+ 3
2003
+ 0.008
2004
+ 2
2005
+ 0.006
2006
+ 0.004
2007
+ 1
2008
+ 0.002
2009
+ 0
2010
+ 1
2011
+ 2
2012
+ 3
2013
+ 4
2014
+ 5
2015
+ 6predicted uncertainty of u
2016
+ 0.02
2017
+ 6
2018
+ 0.018
2019
+ 5
2020
+ 0.016
2021
+ 0.014
2022
+ 4
2023
+ 0.012
2024
+ 9
2025
+ 0.01
2026
+ 3
2027
+ 0.008
2028
+ 2
2029
+ 0.006
2030
+ 0.004
2031
+ 1
2032
+ 0.002
2033
+ 0
2034
+ 1
2035
+ 2
2036
+ 3
2037
+ 4
2038
+ 5
2039
+ 6L-HYDRA
2040
+ 17
2041
+ (a)
2042
+ (b)
2043
+ Fig. 10.
2044
+ Results for the downstream inverse problem on the stochastic Helmholtz equa-
2045
+ tion (4.12), with 50 random noisy measurements of f and 10 random noisy measurements of u.
2046
+ λ is estimated as 1.0785 ± 0.0307 in the format of predicted mean ± predicted standard deviation,
2047
+ while the reference value is 1.0042. (a)/(b) From left to right are reference, predicted mean, absolute
2048
+ error, and predicted uncertainty of f/u. Black crosses represent locations of the measurements of f
2049
+ or u.
2050
+ and learn informative prior knowledge from {Tk}M
2051
+ k=1.
2052
+ This was achieved by first
2053
+ training MH-NNs in a MTL fashion and then training NFs to estimate the PDF of the
2054
+ head. Intuitively, the capability of MH-NNs when trained in MTL in capturing shared
2055
+ information is the key to the success in generative modeling and few-shot learning.
2056
+ For physics-informed MTL with MH-PINNs, ODEs/PDEs are solved simultaneously,
2057
+ and assuming the solutions to share the same set of basis functions gives us samples
2058
+ of the set of coefficients, which enables the generative modeling, followed by few-shot
2059
+ learning, which is the whole point of the method proposed in this paper. However,
2060
+ the cost and/or the benefit of imposing the same set of basis functions to all solutions
2061
+ have not been explicitly discussed yet. On one hand, the shared body relates the
2062
+ training of tasks, which may be helpful if tasks are similar in certain ways. On the
2063
+ other hand, forcing all solutions to share the same basis functions may also be harmful
2064
+ when they behave differently. In particular, for tasks with sufficient data and physics,
2065
+ forcing them to share the same body with all other tasks may act as a negative
2066
+ regularization, and single-task learning (STL) may outperform MTL in terms of the
2067
+ prediction accuracy in those specific tasks. In this section, we investigate the effect
2068
+ of MTL using MH-NNs and provide preliminary results and analysis by revisiting the
2069
+ simple function approximation example in Sec. 4.1, which, hopefully, could provide
2070
+ useful information and insight for future more rigorous research.
2071
+ 5.1. Basis function learning and synergistic learning. As discussed before,
2072
+ the quality and behavior of basis functions learned in MTL are crucial to genera-
2073
+ tive modeling and learning the relation and the representative information of tasks
2074
+ {Tk}M
2075
+ k=1. We consistently noticed from numerical examples that the initialization of
2076
+ the head in MH-NNs has great impact on the average accuracy of MTL, the learning of
2077
+ the basis functions, and the distribution of the head. Here, we test three initialization
2078
+ strategies, random normal method with 0.05 standard deviation referred to as RN
2079
+
2080
+ reference of f
2081
+ 6
2082
+ 5
2083
+ 0.5
2084
+ 4
2085
+ 9
2086
+ 3
2087
+ 0
2088
+ 2
2089
+ 1
2090
+ -0.5
2091
+ 1
2092
+ 2
2093
+ 3
2094
+ 4
2095
+ 5
2096
+ 6predicted mean of f
2097
+ x
2098
+ 6
2099
+ X
2100
+ X
2101
+ X
2102
+ X
2103
+ X
2104
+ X
2105
+ X
2106
+ X
2107
+ 5
2108
+ X
2109
+ X
2110
+ X
2111
+ X
2112
+ X
2113
+ X
2114
+ X
2115
+ 0.5
2116
+ 4
2117
+ X
2118
+ X
2119
+ X
2120
+ X
2121
+ X
2122
+ X
2123
+ X
2124
+ 9
2125
+ 3
2126
+ X
2127
+ X
2128
+ X
2129
+ X
2130
+ X
2131
+ X
2132
+ 0
2133
+ 2
2134
+ X
2135
+ X
2136
+ X
2137
+ X
2138
+ X
2139
+ X
2140
+ X
2141
+ X
2142
+ 1
2143
+ X
2144
+ X
2145
+ X
2146
+ -0.5
2147
+ 1
2148
+ 2
2149
+ 3
2150
+ 4
2151
+ 5
2152
+ 6absolute error of i
2153
+ 0.1
2154
+ 6
2155
+ 0.09
2156
+ 5
2157
+ 0.08
2158
+ 0.07
2159
+ 4
2160
+ X
2161
+ 0.06
2162
+ 9
2163
+ 0.05
2164
+ 3
2165
+ X
2166
+ X
2167
+ X
2168
+ 0.04
2169
+ 2
2170
+ 0.03
2171
+ 0.02
2172
+ 0.01
2173
+ 0
2174
+ 2
2175
+ 3
2176
+ 4
2177
+ 5
2178
+ 6predicted uncertainty of f
2179
+ 0.1
2180
+ 6
2181
+ 0.09
2182
+ 5
2183
+ 0.08
2184
+ 0.07
2185
+ 4
2186
+ 0.06
2187
+ 9
2188
+ 0.05
2189
+ 3
2190
+ 0.04
2191
+ 2
2192
+ 0.03
2193
+ 0.02
2194
+ 0.01
2195
+ 0
2196
+ 2
2197
+ 3
2198
+ 4
2199
+ 5
2200
+ 6reference of u
2201
+ 6
2202
+ 0.04
2203
+ 5
2204
+ 0.02
2205
+ 0
2206
+ 4
2207
+ -0.02
2208
+ 9
2209
+ 3
2210
+ -0.04
2211
+ 2
2212
+ -0.06
2213
+ 1
2214
+ -0.08
2215
+ -0.1
2216
+ 1
2217
+ 2
2218
+ 3
2219
+ 4
2220
+ 5
2221
+ 6predicted mean of u
2222
+ 6
2223
+ 0.04
2224
+ X
2225
+ 5
2226
+ 0.02
2227
+ X
2228
+ X
2229
+ 0
2230
+ 4
2231
+ -0.02
2232
+ X
2233
+ X
2234
+ 9
2235
+ 3
2236
+ -0.04
2237
+ 2
2238
+ X
2239
+ X
2240
+ X
2241
+ -0.06
2242
+ 1
2243
+ -0.08
2244
+ X
2245
+ -0.1
2246
+ 1
2247
+ 2
2248
+ 3
2249
+ 4
2250
+ 5
2251
+ 6absolute error of u
2252
+ 0.02
2253
+ 6
2254
+ 0.018
2255
+ 5
2256
+ 0.016
2257
+ 0.014
2258
+ 4
2259
+ 0.012
2260
+ 9
2261
+ x
2262
+ X
2263
+ 0.01
2264
+ 3
2265
+ 0.008
2266
+ 2
2267
+ X
2268
+ 0.006
2269
+ 0.004
2270
+ 1
2271
+ X
2272
+ 0.002
2273
+ 0
2274
+ 1
2275
+ 2
2276
+ 3
2277
+ 4
2278
+ 5
2279
+ 6predicted uncertainty of u
2280
+ 0.02
2281
+ 6
2282
+ 0.018
2283
+ 5
2284
+ 0.016
2285
+ 0.014
2286
+ 4
2287
+ 0.012
2288
+ 9
2289
+ 0.01
2290
+ 3
2291
+ 0.008
2292
+ 2
2293
+ 0.006
2294
+ 0.004
2295
+ 1
2296
+ 0.002
2297
+ 0
2298
+ 1
2299
+ 2
2300
+ 3
2301
+ 4
2302
+ 5
2303
+ 618
2304
+ Z. ZOU AND G. E. KARNIADAKIS
2305
+ (0.05), Glorot uniform method [12] referred to as GU, and random normal method
2306
+ with 1 standard deviation referred to as RN (1). In the downstream few-shot learning
2307
+ tasks, we fine-tune the head without the learned PDF, which is in fact the TL method
2308
+ from [7], by which the information from the distribution of the head is excluded and
2309
+ the prediction accuracy is fully determined by the level of prior knowledge contained
2310
+ in the basis functions.
2311
+ As shown in Fig. 11, RN (0.05) yields the least informative basis functions, whose
2312
+ behavior is dominated by the hyperbolic tangent activation function of NNs. This is
2313
+ further demonstrated in the downstream few-shot learning tasks using the TL method.
2314
+ It also provides the worst prediction accuracy on average in MTL, as presented in
2315
+ Table 8. GN and RN (1) perform similarly. Plots of some basis functions seemingly
2316
+ indicate that RN (1) yields better basis functions, whose behaviors are more similar
2317
+ to the family of functions displayed in Fig. 11(b), which, however, does not necessarily
2318
+ imply richer prior knowledge in the downstream tasks, as shown in Fig. 11(c).
2319
+ It is shown empirically that compared to other two initialization strategies, MH-
2320
+ NNs with RN (0.05) does not deliver accurate MTL nor synergistic learning in basis
2321
+ functions. However, we noticed that, in generative modeling, it performs significantly
2322
+ better in terms of accuracy and convergence speed. As shown in Fig. 11(d), samples
2323
+ from the learned generator are of higher quality. We consistently found that initializ-
2324
+ ing heads with relatively small values often led to easy and fast training of NFs and
2325
+ accurate learning of the generative models. We conjecture that this happens because
2326
+ MH-NNs in MTL tend to contain the representative and informative information in
2327
+ the heads when heads are initialized with small values, while contain it in the basis
2328
+ functions when heads are initialized with relatively large values.
2329
+ RN (0.05)
2330
+ GU
2331
+ RN (1)
2332
+ Error (%)
2333
+ 0.8373 ± 0.2341
2334
+ 0.1907 ± 0.0690
2335
+ 0.3131 ± 0.0937
2336
+ Table 7
2337
+ L2 relative errors, from MTL, for 1, 000 tasks, using different initialization methods.
2338
+ The
2339
+ errors are displayed in the format of mean ± standard deviation, computed over all tasks.
2340
+ 5.2. Multi-Task Learning (MTL) versus Single-Task Learning (STL).
2341
+ As discussed earlier, MTL with MH-NNs does not necessarily result in synergistic
2342
+ learning nor higher accuracy for all tasks on average. Here, we use again the function
2343
+ approximation example in Sec. 4.1, to investigate the effectiveness of MTL with MH-
2344
+ NNs, as compared to STL. The first case we consider here is assuming that the data is
2345
+ sufficient. For that, we randomly choose 100 out of the 1, 000 training samples, each
2346
+ one of which is approximated by a NN trained independently, and compare the results
2347
+ with MH-NNs in terms of prediction accuracy. Note that in this case, a MH-NN is
2348
+ trained on 1, 000 functions as before and tested on the chosen 100 functions, while a
2349
+ single-head NN with the same architecture is trained on 100 functions directly. Results
2350
+ are shown in Table 8, from which it is verified empirically that MTL is outperformed
2351
+ by STL under certain circumstances, e.g., when the random normal initialization
2352
+ methods are used.
2353
+ The second case we consider is assuming that the data is sufficient for some tasks
2354
+ while insufficient for other tasks. For that, we split equally the 1, 000 tasks into two
2355
+ subsets of tasks. For the first 500 tasks, we assume we only have 10 measurements
2356
+ randomly sampled on [−1, 1], while for the other 500 tasks, we assume we have full 40
2357
+ measurements equidistantly distributed on [−1, 1]. MTL with MH-NNs is performed
2358
+ on those 1, 000 regression tasks all at once, and the tasks are treated as equal. The
2359
+
2360
+ L-HYDRA
2361
+ 19
2362
+ (a)
2363
+ (b)
2364
+ (c)
2365
+ (d)
2366
+ Fig. 11. The effect of different initialization methods of the head, in basis functions learning,
2367
+ few-shot learning, and generator learning. (a) Samples of 20 basis functions from MH-NNs, trained
2368
+ for approximating 1, 000 f generated from Eq. (4.1), using, from left to right, RN (0.05), GU and
2369
+ RN (1) initialization methods. (b) 1, 000 training samples of f. (c) Results for two downstream
2370
+ few-shot regression tasks, using TL method without regularization informed by the learned PDF, as
2371
+ opposite to the proposed approach. (d) Results for generator learning, using, from left to right, RN
2372
+ (0.05), GU and RN (1) initialization methods.
2373
+ RN (0.05)
2374
+ GU
2375
+ RN (1)
2376
+ STL
2377
+ Error (%)
2378
+ 0.7575 ± 0.2477
2379
+ 0.1362 ± 0.0259
2380
+ 0.3664 ± 0.1031
2381
+ 0.2102 ± 0.0794
2382
+ Table 8
2383
+ L2 relative errors of f, from MTL with MH-NNs and STL with NNs, on 100 tasks. Different
2384
+ initialization methods are used for the heads in MH-NNs. The errors are displayed in the format of
2385
+ mean ± standard deviation, computed over all 100 tasks.
2386
+ results are presented in Table 9 and Fig. 12. We can see that, compared to STL, MTL
2387
+ improves the prediction accuracy on tasks with insufficient data, providing empirical
2388
+ evidence of synergistic learning.
2389
+ Also, interestingly, RN (1) initialization method,
2390
+ which yields the worst generative models, performs the best among all three, which
2391
+ agrees with our previous conjecture on the basis functions learning with MH-NNs,
2392
+ that heads initialized with large values tend to force representative and informative
2393
+ information to be encoded in the basis functions.
2394
+ 6. Discussion. We have developed multi-head neural networks (MH-NNs) for
2395
+ physics-informed machine learning, and proposed multi-head physics-informed neural
2396
+ networks (MH-PINNs) as a new method, implemented in the L-HYDRA code. The
2397
+ primary focus of this work is on MH-NNs and MH-PINNs for various learning prob-
2398
+
2399
+ 6
2400
+ -1
2401
+ -0.8
2402
+ -0.6
2403
+ -0.4
2404
+ -0.2
2405
+ 0
2406
+ 0.2
2407
+ 0.4
2408
+ 0.6
2409
+ 0.80.8
2410
+ 0.6
2411
+ 0.4
2412
+ 0.2
2413
+ 0
2414
+ -0.2
2415
+ -0.4
2416
+ -0.6
2417
+ -0.8
2418
+ -0.8
2419
+ -0.6
2420
+ -0.4
2421
+ -0.2
2422
+ 0
2423
+ 0.2
2424
+ 0.4
2425
+ 0.6
2426
+ 0.80.8
2427
+ 0.6
2428
+ 0.4
2429
+ 0.2
2430
+ 0
2431
+ -0.2
2432
+ -0.4
2433
+ -0.6
2434
+ -0.8
2435
+ 1
2436
+ -1
2437
+ -0.8
2438
+ -0.6
2439
+ -0.4
2440
+ -0.2
2441
+ 0
2442
+ 0.2
2443
+ 0.4
2444
+ 0.6
2445
+ 0.81
2446
+ 0.8
2447
+ 0.6
2448
+ 0.4
2449
+ 0.2
2450
+ 0
2451
+ -0.2
2452
+ -0.4
2453
+ -0.6
2454
+ -0.8
2455
+ 1
2456
+ -1
2457
+ -0.8
2458
+ -0.6
2459
+ -0.4
2460
+ -0.2
2461
+ 0
2462
+ 0.2
2463
+ 0.4
2464
+ 0.6
2465
+ 0.8Measurements
2466
+ Reference
2467
+ RN (0.05)
2468
+ 3
2469
+ GU
2470
+ 2
2471
+ RN
2472
+ -2
2473
+ -3
2474
+ -1
2475
+ -0.8
2476
+ -0.6
2477
+ -0.4
2478
+ -0.2
2479
+ 0
2480
+ 0.2
2481
+ 0.4
2482
+ 0.6
2483
+ 0.84
2484
+ 3
2485
+ 2
2486
+ 0
2487
+ -1
2488
+ -2
2489
+ -3
2490
+ 4
2491
+ -1
2492
+ -0.8
2493
+ -0.6
2494
+ -0.4
2495
+ -0.2
2496
+ 0
2497
+ 0.2
2498
+ 0.4
2499
+ 0.6
2500
+ 0.86
2501
+ -1
2502
+ -0.8
2503
+ -0.6
2504
+ -0.4
2505
+ -0.2
2506
+ 0
2507
+ 0.2
2508
+ 0.4
2509
+ 0.6
2510
+ 0.86
2511
+ -1
2512
+ -0.8
2513
+ -0.6
2514
+ -0.4
2515
+ -0.2
2516
+ 0
2517
+ 0.2
2518
+ 0.4
2519
+ 0.6
2520
+ 0.8-0.8
2521
+ -0.6
2522
+ -0.4
2523
+ -0.2
2524
+ 0
2525
+ 0.2
2526
+ 0.4
2527
+ 0.6
2528
+ 0.820
2529
+ Z. ZOU AND G. E. KARNIADAKIS
2530
+ RN (0.05)
2531
+ GU
2532
+ RN (1)
2533
+ Error (%)
2534
+ 63.60 ± 24.08
2535
+ 40.49 ± 20.49
2536
+ 16.91 ± 11.08
2537
+ Table 9
2538
+ L2 relative errors of f, from MTL with MH-NNs, on 500 tasks equipped with insufficient data.
2539
+ The errors are displayed in the format of mean ± standard deviation, computed over all 500 tasks.
2540
+ Fig. 12. Results for 3 tasks with insufficient data from MTL with MH-NNs, using different
2541
+ initialization methods over the head, and from STL with NNs with the same architecture. We note
2542
+ that tasks with sufficient data and tasks with insufficient data are treated equally in MTL.
2543
+ lems in scientific machine learning, including multi-task learning (MTL), stochastic
2544
+ processes approximation, and few-shot regression learning. We first formulated the
2545
+ problem in Eq. (2.1), introduced the architecture design of MH-PINNs, and proposed
2546
+ a method to transform MH-NNs and MH-PINNs to generative models with the help
2547
+ of normalizing flows (NFs) for density estimation and generative modeling. We then
2548
+ studied the applicability and capabilities of MH-PINNs in solving ordinary/paritial
2549
+ differential equations (ODEs/PDEs) as well as approximating stochastic processes.
2550
+ We completed the paper with preliminary and empirical explorations of MH-NNs
2551
+ in synergistic learning, and examined the potential benefits and cost of MTL with
2552
+ MH-NNs.
2553
+ This paper can be used in various ways: it proposes a NN approach for MTL in
2554
+ solving ODEs/PDEs; it provides a new approach to approximate stochastic processes;
2555
+ it presents a method to address few-shot physics-informed learning problems, which
2556
+ are often encountered in the context of meta-learning and transfer learning; it contains
2557
+ a systematic study of applying MH-NNs to scientific computing problems; it presents
2558
+ the first empirical evidence of synergistic learning.
2559
+ However, there are a few major problems on MH-NNs we did not address, one
2560
+ of which is the expressivity of MH-NNs, or more generally hard-parameter sharing
2561
+ NNs in approximating complicated stochastic processes. Intuitively, if two functions
2562
+ behave very differently, forcing them to share the same basis functions would affect
2563
+ adversely the approximation accuracy. The second problem is the balancing issue of
2564
+ different terms in the loss function in MTL. It is shown in the literature [29] that
2565
+ PINNs, trained in single-task learning, are already deeply influenced by the weights
2566
+ in front of different terms in the loss function, e.g., data loss, boundary condition loss,
2567
+ PDE residual loss. This issue may be more complex in training MH-PINNs, because
2568
+ in MTL the loss function is commonly defined as weighted summation of task-specific
2569
+ loss. The last major problem is MH-PINNs for synergistic learning. In this paper, we
2570
+ only studied one example in function approximation and presented empirical evidence.
2571
+ More work for the understanding of synergistic learning with MH-PINNs along both
2572
+ the theoretical and computational directions should be pursued in the future.
2573
+ Acknowledgments. We would like to thank Professor Xuhui Meng of Huazhong
2574
+ University of Science and Technology for helpful discussions. This work was supported
2575
+ by: the Vannevar Bush Faculty Fellowship award (GEK) from ONR (N00014-22-
2576
+
2577
+ Measurements
2578
+ Reference
2579
+ RN (0.05)
2580
+ 3
2581
+ GU
2582
+ 2
2583
+ RN(1)
2584
+ NN
2585
+ 3
2586
+ -0.8
2587
+ -0.6
2588
+ -1
2589
+ -0.4
2590
+ -0.2
2591
+ 0
2592
+ 0.2
2593
+ 0.4
2594
+ 0.6
2595
+ 0.85
2596
+ 4
2597
+ 3
2598
+ 2
2599
+ 0
2600
+ -0.8
2601
+ -0.6
2602
+ -0.4
2603
+ -0.2
2604
+ 0
2605
+ 0.2
2606
+ 0.4
2607
+ 0.6
2608
+ 0.85
2609
+ 4
2610
+ 3
2611
+ 0
2612
+ -1
2613
+ -2
2614
+ -3
2615
+ 4
2616
+ -1
2617
+ -0.8
2618
+ -0.6
2619
+ -0.4
2620
+ -0.2
2621
+ 0
2622
+ 0.2
2623
+ 0.4
2624
+ 0.6
2625
+ 0.8L-HYDRA
2626
+ 21
2627
+ 1-2795); the U.S. Department of Energy, Advanced Scientific Computing Research
2628
+ program, under the Scalable, Efficient and Accelerated Causal Reasoning Operators,
2629
+ Graphs and Spikes for Earth and Embedded Systems (SEA-CROGS) project, DE-
2630
+ SC0023191; and by the MURI/AFOSR FA9550-20-1-0358 project.
2631
+ REFERENCES
2632
+ [1] M. Abadi, A. Agarwal, P. Barham, and et. al., TensorFlow: Large-scale machine learning
2633
+ on heterogeneous systems, 2015, https://www.tensorflow.org/.
2634
+ Software available from
2635
+ tensorflow.org.
2636
+ [2] M. J. Ablowitz and A. Zeppetella, Explicit solutions of Fisher’s equation for a special wave
2637
+ speed, Bulletin of Mathematical Biology, 41 (1979), pp. 835–840.
2638
+ [3] B. Bahmani and W. Sun, Training multi-objective/multi-task collocation physics-informed
2639
+ neural network with student/teachers transfer learnings, arXiv preprint arXiv:2107.11496,
2640
+ (2021).
2641
+ [4] K. Bhattacharya, B. Hosseini, N. B. Kovachki, and A. M. Stuart, Model reduction and
2642
+ neural networks for parametric PDEs, arXiv preprint arXiv:2005.03180, (2020).
2643
+ [5] R. Caruana, Multitask Learning: A Knowledge-Based Source of Inductive Bias, in Interna-
2644
+ tional Conference on Machine Learning, 1993.
2645
+ [6] X. Chen, J. Duan, and G. E. Karniadakis, Learning and meta-learning of stochastic
2646
+ advection–diffusion–reaction systems from sparse measurements, European Journal of Ap-
2647
+ plied Mathematics, 32 (2021), pp. 397–420.
2648
+ [7] S. Desai, M. Mattheakis, H. Joy, P. Protopapas, and S. Roberts, One-shot transfer
2649
+ learning of physics-informed neural networks, arXiv preprint arXiv:2110.11286, (2021).
2650
+ [8] J. V. Dillon, I. Langmore, D. Tran, E. Brevdo, S. Vasudevan, D. Moore, B. Patton,
2651
+ A. Alemi, M. Hoffman, and R. A. Saurous, TensorFlow Distributions, arXiv preprint
2652
+ arXiv:1711.10604, (2017).
2653
+ [9] L. Dinh, J. Sohl-Dickstein, and S. Bengio, Density Estimation Using Real NVP, arXiv
2654
+ preprint arXiv:1605.08803, (2016).
2655
+ [10] C. Finn, P. Abbeel, and S. Levine, Model-agnostic meta-learning for fast adaptation of deep
2656
+ networks, in International Conference on Machine Learning, PMLR, 2017, pp. 1126–1135.
2657
+ [11] M. Germain, K. Gregor, I. Murray, and H. Larochelle, MADE: Masked autoencoder for
2658
+ distribution estimation, in International Conference on Machine Learning, PMLR, 2015,
2659
+ pp. 881–889.
2660
+ [12] X. Glorot and Y. Bengio, Understanding the difficulty of training deep feedforward neural
2661
+ networks, in Proceedings of the thirteenth international conference on artificial intelligence
2662
+ and statistics, JMLR Workshop and Conference Proceedings, 2010, pp. 249–256.
2663
+ [13] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair,
2664
+ A. Courville, and Y. Bengio, Generative adversarial networks, Communications of the
2665
+ ACM, 63 (2020), pp. 139–144.
2666
+ [14] S. Goswami, K. Kontolati, M. D. Shields, and G. E. Karniadakis, Deep transfer oper-
2667
+ ator learning for partial differential equations under conditional shift, Nature Machine
2668
+ Intelligence, 4 (2022), pp. 1155–1164.
2669
+ [15] L. Guo, H. Wu, and T. Zhou, Normalizing field flows: Solving forward and inverse stochas-
2670
+ tic differential equations using physics-informed flow models, Journal of Computational
2671
+ Physics, 461 (2022), p. 111202.
2672
+ [16] J. Ho, A. Jain, and P. Abbeel, Denoising diffusion probabilistic models, Advances in Neural
2673
+ Information Processing Systems, 33 (2020), pp. 6840–6851.
2674
+ [17] G. E. Karniadakis, I. G. Kevrekidis, L. Lu, P. Perdikaris, S. Wang, and L. Yang, Physics-
2675
+ informed machine learning, Nature Reviews Physics, 3 (2021), pp. 422–440.
2676
+ [18] R. E. Kass, The validity of posterior expansions based on Laplace’s method, Bayesian and
2677
+ Likelihood Methods in Statistics and Econometrics, (1990), pp. 473–488.
2678
+ [19] Y. Khoo, J. Lu, and L. Ying, Solving parametric PDE problems with artificial neural net-
2679
+ works, European Journal of Applied Mathematics, 32 (2021), pp. 421–435.
2680
+ [20] D. P. Kingma, T. Salimans, R. Jozefowicz, X. Chen, I. Sutskever, and M. Welling,
2681
+ Improved variational inference with inverse autoregressive flow, Advances in Neural Infor-
2682
+ mation Processing Systems, 29 (2016).
2683
+ [21] D.
2684
+ P.
2685
+ Kingma
2686
+ and
2687
+ M.
2688
+ Welling,
2689
+ Auto-encoding
2690
+ variational
2691
+ bayes,
2692
+ arXiv
2693
+ preprint
2694
+ arXiv:1312.6114, (2013).
2695
+ [22] I. Kobyzev, S. J. Prince, and M. A. Brubaker, Normalizing flows: An introduction and re-
2696
+ view of current methods, IEEE Transactions on Pattern Analysis and Machine Intelligence,
2697
+
2698
+ 22
2699
+ Z. ZOU AND G. E. KARNIADAKIS
2700
+ 43 (2020), pp. 3964–3979.
2701
+ [23] J. Lao, C. Suter, I. Langmore, C. Chimisov, A. Saxena, P. Sountsov, D. Moore, R. A.
2702
+ Saurous, M. D. Hoffman, and J. V. Dillon, tfp. mcmc: Modern Markov chain Monte
2703
+ Carlo tools built for modern hardware, arXiv preprint arXiv:2002.01184, (2020).
2704
+ [24] Z. Li, N. Kovachki, K. Azizzadenesheli, B. Liu, K. Bhattacharya, A. Stuart, and
2705
+ A. Anandkumar, Fourier neural operator for parametric partial differential equations,
2706
+ arXiv preprint arXiv:2010.08895, (2020).
2707
+ [25] Z. Lin, Z. Zhao, Z. Zhang, H. Baoxing, and J. Yuan, To Learn Effective Features: Un-
2708
+ derstanding the Task-Specific Adaptation of MAML, 2021, https://openreview.net/forum?
2709
+ id=FPpZrRfz6Ss.
2710
+ [26] K. Linka, A. Schafer, X. Meng, Z. Zou, G. E. Karniadakis, and E. Kuhl, Bayesian
2711
+ Physics-Informed Neural Networks for real-world nonlinear dynamical systems, arXiv pre-
2712
+ print arXiv:2205.08304, (2022).
2713
+ [27] X. Liu, X. Zhang, W. Peng, W. Zhou, and W. Yao, A novel meta-learning initialization
2714
+ method for physics-informed neural networks, Neural Computing and Applications, (2022),
2715
+ pp. 1–24.
2716
+ [28] L. Lu, P. Jin, G. Pang, Z. Zhang, and G. E. Karniadakis, Learning nonlinear operators
2717
+ via DeepONet based on the universal approximation theorem of operators, Nature Machine
2718
+ Intelligence, 3 (2021), pp. 218–229.
2719
+ [29] L. McClenny and U. Braga-Neto, Self-adaptive physics-informed neural networks using a
2720
+ soft attention mechanism, arXiv preprint arXiv:2009.04544, (2020).
2721
+ [30] X. Meng, L. Yang, Z. Mao, J. del ´Aguila Ferrandis, and G. E. Karniadakis, Learning
2722
+ functional priors and posteriors from data and physics, Journal of Computational Physics,
2723
+ 457 (2022), p. 111073.
2724
+ [31] R. M. Neal et al., MCMC using Hamiltonian dynamics, Handbook of Markov Chain Monte
2725
+ Carlo, 2 (2011), p. 2.
2726
+ [32] G. Papamakarios, E. T. Nalisnick, D. J. Rezende, S. Mohamed, and B. Lakshmi-
2727
+ narayanan, Normalizing Flows for Probabilistic Modeling and Inference., J. Mach. Learn.
2728
+ Res., 22 (2021), pp. 1–64.
2729
+ [33] G. Papamakarios, T. Pavlakou, and I. Murray, Masked autoregressive flow for density
2730
+ estimation, Advances in Neural Information Processing Systems, 30 (2017).
2731
+ [34] M. Penwarden, S. Zhe, A. Narayan, and R. M. Kirby, Physics-informed neural net-
2732
+ works (PINNs) for parameterized PDEs:
2733
+ A metalearning approach, arXiv preprint
2734
+ arXiv:2110.13361, (2021).
2735
+ [35] P. Perdikaris, D. Venturi, and G. E. Karniadakis, Multifidelity information fusion algo-
2736
+ rithms for high-dimensional systems and massive data sets, SIAM Journal on Scientific
2737
+ Computing, 38 (2016), pp. B521–B538.
2738
+ [36] A. F. Psaros, X. Meng, Z. Zou, L. Guo, and G. E. Karniadakis, Uncertainty Quantifica-
2739
+ tion in Scientific Machine Learning: Methods, Metrics, and Comparisons, arXiv preprint
2740
+ arXiv:2201.07766, (2022).
2741
+ [37] A. Raghu, M. Raghu, S. Bengio, and O. Vinyals, Rapid Learning or Feature Reuse? To-
2742
+ wards Understanding the Effectiveness of MAML, arXiv preprint arXiv:1909.09157, (2019).
2743
+ [38] M. Raissi, P. Perdikaris, and G. E. Karniadakis, Physics-informed neural networks: A deep
2744
+ learning framework for solving forward and inverse problems involving nonlinear partial
2745
+ differential equations, Journal of Computational Physics, 378 (2019), pp. 686–707.
2746
+ [39] S. Ruder, An overview of multi-task learning in deep neural networks, arXiv preprint
2747
+ arXiv:1706.05098, (2017).
2748
+ [40] P. Thanasutives, M. Numao, and K.-i. Fukui, Adversarial multi-task learning enhanced
2749
+ physics-informed neural networks for solving partial differential equations, in 2021 Inter-
2750
+ national Joint Conference on Neural Networks (IJCNN), IEEE, 2021, pp. 1–9.
2751
+ [41] H. Wang, H. Zhao, and B. Li, Bridging multi-task learning and meta-learning: Towards effi-
2752
+ cient training and effective adaptation, in International Conference on Machine Learning,
2753
+ PMLR, 2021, pp. 10991–11002.
2754
+ [42] S. Wang, H. Wang, and P. Perdikaris, Learning the solution operator of parametric par-
2755
+ tial differential equations with physics-informed DeepONets, Science Advances, 7 (2021),
2756
+ p. eabi8605.
2757
+ [43] L. Yang, X. Meng, and G. E. Karniadakis, B-PINNs: Bayesian physics-informed neural
2758
+ networks for forward and inverse PDE problems with noisy data, Journal of Computational
2759
+ Physics, 425 (2021), p. 109913.
2760
+ [44] L. Yang, D. Zhang, and G. E. Karniadakis, Physics-informed generative adversarial net-
2761
+ works for stochastic differential equations, SIAM Journal on Scientific Computing, 42
2762
+ (2020), pp. A292–A317.
2763
+
2764
+ L-HYDRA
2765
+ 23
2766
+ [45] M. Yang and J. T. Foster, Multi-output physics-informed neural networks for forward and
2767
+ inverse PDE problems with uncertainties, Computer Methods in Applied Mechanics and
2768
+ Engineering, (2022), p. 115041.
2769
+ [46] W. Zhong and H. Meidani, PI-VAE: Physics-Informed Variational Auto-Encoder for stochas-
2770
+ tic differential equations, arXiv preprint arXiv:2203.11363, (2022).
2771
+ [47] Z. Zou, X. Meng, A. F. Psaros, and G. E. Karniadakis, NeuralUQ: A comprehensive library
2772
+ for uncertainty quantification in neural differential equations and operators, arXiv preprint
2773
+ arXiv:2208.11866, (2022).
2774
+ Appendix A. Details of NN architectures and training hyperparam-
2775
+ eters.
2776
+ For all examples in Secs. 4 and 5, MH-PINNs are implemented as fully-
2777
+ connected NNs (FNNs) with 3 nonlinear hidden layers, each of which is equipped
2778
+ with 50 neurons and hyperbolic tangent activation function. The number of heads is
2779
+ the same as the number of tasks in the corresponding examples: 1, 000 in Sec. 4.1,
2780
+ 2, 000 in Secs. 4.2, 4.3 and 4.4, and 10, 000 in Sec. 4.5. Weights in the body of MH-
2781
+ PINNs are initialized with Glorot uniform initialization [12] and biases are initialized
2782
+ with zero, while heads are initialized by sampling from random normal distribution
2783
+ with 0.05 standard deviation, for fast training of NFs and better performance of the
2784
+ learned generators.
2785
+ Except for the forward problem in Sec. 4.2, NFs in this paper are chosen to be
2786
+ MAF [33] with 10 bijectors, i.e. the invertible map in NFs, each of which is a MADE
2787
+ [11], a NN with masked dense layers, with two nonlinear hidden layers equipped with
2788
+ 100 neurons and ReLU activation function. The RealNVP [9] and IAF [20] used in
2789
+ the forward problem in Sec. 4.2 also have 10 bijectors, each of which is a NN with
2790
+ two nonlinear hidden layers equipped with 100 neurons and ReLU activation function.
2791
+ The implementation mostly follows the instructions of TensorFlow Probability library
2792
+ [8] for NFs.
2793
+ PI-GANs [44] implemented in Sec. 4.2 have the following architecture: the dis-
2794
+ criminator is a FNN with 3 nonlinear hidden layers, each of which is equipped with
2795
+ 128 neurons and Leaky ReLU activation function; the generator that takes as input
2796
+ t is a FNN with 3 nonlinear hidden layers, each of which is equipped with 50 neu-
2797
+ rons and hyperbolic tangent activation function; the other generator takes as input a
2798
+ Gaussian random variable in 50 dimensions with zero mean and identity covariance
2799
+ matrix, and is implemented as a FNN with 3 nonlinear hidden layers, each of which
2800
+ has 128 neurons and hyperbolic tangent activation function. The input dimensions of
2801
+ those 3 FNNs are 65, 1 and 50, and the output dimensions are 1, 50, 50, respectively.
2802
+ For the training of MH-PINNs, full-batch training is deployed with Adam opti-
2803
+ mizer for 50, 000 iterations. For the training of NFs, except for the forward problem
2804
+ in Sec. 4.2, mini-batch training is deployed with batch size being 100 and Adam op-
2805
+ timizer for 1, 000 epochs.
2806
+ NFs in the forward problem in Sec. 4.2 are trained for
2807
+ 500 epochs instead, and L2 regularization is imposed to the parameters of RealNVP
2808
+ for better performance. For all NFs, to achieve stable training, a hyperbolic tangent
2809
+ function is imposed on the logarithm of the scale, computed from each bijector, such
2810
+ that the logarithm of the scale lies in (−1, 1). For the training of PI-GANs, min-
2811
+ batch training is deployed with batch size being 100 and Adam optimizer for 100, 000
2812
+ iterations. Besides, the same as in [44, 30], physics-informed Wasserstein GANs (PI-
2813
+ WGANs) with gradient penalty are employed, in which the coefficient for gradient
2814
+ penalty is set to be 0.1. Iteratively, 5 updates of the discriminator are performed and
2815
+ followed by 1 update of the generators. Except in training PI-GANs, the learning
2816
+ rate of Adam optimizer is set to be 10−3 and other hyperparameters of Adam are set
2817
+ as default. In training PI-GANs, the learning rate is set to be 10−4, β1 = 0.5 and
2818
+ β2 = 0.9 in Adam optimizer for both discriminator and generators.
2819
+
2820
+ 24
2821
+ Z. ZOU AND G. E. KARNIADAKIS
2822
+ Training of MH-PINNs, NFs, and PI-GANs was all performed on a single NVIDIA
2823
+ TITAN Xp GPU. The L-HYDRA code for TensorFlow implementation along with
2824
+ some representative examples will be released on GitHub once the paper is accepted.
2825
+ Appendix B. Details for performing Bayesian inference.
2826
+ Hamiltonian
2827
+ Monte Carlo (HMC) [31] is employed in all Bayesian inference examples for uncer-
2828
+ tainty quantification (UQ) while Laplace approximation [18] is only employed in the
2829
+ first example. In this paper, HMC with adaptive step size [23] is used, in which the
2830
+ initial step size is set to be either 0.1 or 0.01, tuned for better acceptance rate. The
2831
+ number of burn-in samples and the number of posterior samples are set to be 1, 000.
2832
+ The number of steps for the leapfrog scheme is set to be either 30 or 50, also tuned for
2833
+ better acceptance rate. NeuralUQ library [47] for UQ in scientific machine learning is
2834
+ used as a tool for physics-informed Bayesian inference in the downstream tasks. The
2835
+ ideal acceptance rate in HMC, as discussed in [30, 47], is around 60%. In this paper,
2836
+ we found chains with acceptance rate from 50% to 80% acceptable.
2837
+
5dA0T4oBgHgl3EQfNv_S/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
69E3T4oBgHgl3EQfpwru/content/2301.04646v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5af451f44872e9d8480d3aded8c352825edffe7a8f78529a87613adbfedc7de4
3
+ size 3092086
69E3T4oBgHgl3EQfpwru/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:358a61999098c8ea302f5a1f035735a3bbb6047fd855f59b3562c193cc3c5145
3
+ size 3407917
69E3T4oBgHgl3EQfpwru/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c49b0ae81857550ecb295363f645b2341943e48f2622edae4e333d6dd1556f6
3
+ size 112015
79FAT4oBgHgl3EQfoh2y/content/2301.08635v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9e9382154a8f7a3228f743b0be0c75747a5fd42b3a68b0f4028cee2ded8721d
3
+ size 1744595
79FAT4oBgHgl3EQfoh2y/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd7d4e6c63b690923b5212d2dbc5d5a1a9dac937e3d1109e99a85f43cb693bb4
3
+ size 4849709
79FAT4oBgHgl3EQfoh2y/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b15c7d4558d5e294860d52f9fb90e5f0b5e36a56c46e78665f2141077aa08b8
3
+ size 177567
7NE3T4oBgHgl3EQfRgki/content/2301.04421v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:808e71d848b7b5ddbfebc0f31092210c3474a7a594be684b533bdd34c1712b77
3
+ size 883144
7NE3T4oBgHgl3EQfRgki/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01dc251a7fbda5ef5918da14def17b952fb37cede207da3438232f903c7a51e0
3
+ size 3342381
7NE3T4oBgHgl3EQfRgki/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd31d336596e4f4b711c6b6297261fb2df8ad45a96eaedad0031957380dbec22
3
+ size 119746
7NFJT4oBgHgl3EQfmSym/content/tmp_files/2301.11587v1.pdf.txt ADDED
@@ -0,0 +1,1225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Matching of Everyday Power Supply and Demand with Dynamic Pricing:
2
+ Problem Formalisation and Conceptual Analysis
3
+ Thibaut Théatea,∗, Antonio Suterab, Damien Ernsta,c
4
+ aDepartment of Electrical Engineering and Computer Science, University of Liège, Liège, Belgium
5
+ bHaulogy, Intelligent Systems Solutions, Braine-le-Comte, Belgium
6
+ cInformation Processing and Communications Laboratory, Institut Polytechnique de Paris, Paris, France
7
+ Abstract
8
+ The energy transition is expected to significantly increase the share of renewable energy sources whose production is
9
+ intermittent in the electricity mix. Apart from key benefits, this development has the major drawback of generating a
10
+ mismatch between power supply and demand. The innovative dynamic pricing approach may significantly contribute to
11
+ mitigating that critical problem by taking advantage of the flexibility offered by the demand side. At its core, this idea
12
+ consists in providing the consumer with a price signal which is evolving over time, in order to influence its consumption.
13
+ This novel approach involves a challenging decision-making problem that can be summarised as follows: how to determine
14
+ a price signal maximising the synchronisation between power supply and demand under the constraints of maintaining
15
+ the producer/retailer’s profitability and benefiting the final consumer at the same time? As a contribution, this research
16
+ work presents a detailed formalisation of this particular decision-making problem. Moreover, the paper discusses the
17
+ diverse algorithmic components required to efficiently design a dynamic pricing policy: different forecasting models
18
+ together with an accurate statistical modelling of the demand response to dynamic prices.
19
+ Keywords:
20
+ Matching of supply and demand, dynamic pricing, demand response, power producer/retailer.
21
+ 1. Introduction
22
+ Climate change is undeniably a major challenge facing
23
+ humanity in the 21st century [1].
24
+ An ambitious trans-
25
+ formation is required in all sectors to significantly lower
26
+ their respective carbon footprints. Electricity generation
27
+ is no exception, with the burning of fossil fuels, mainly coal
28
+ and gas, being by far the dominant power source in the
29
+ world today [2]. This sector has to undergo an important
30
+ transformation of the global electricity mix by promoting
31
+ power sources with a significantly lower carbon footprint.
32
+ Belonging to that category are nuclear power, hydroelec-
33
+ tricity, biomass or geothermal energy which are relatively
34
+ controllable, but also the energy directly extracted from
35
+ wind and sun which is conversely intermittent in nature.
36
+ Since wind turbines and photovoltaic panels are expected
37
+ to play a key role in the energy transition, solutions are
38
+ required to address their variable production. Interesting
39
+ technical avenues are the interconnection of power grids [3]
40
+ and the development of storage capacities such as battery,
41
+ pumped hydroelectricity or hydrogen [4]. Another promis-
42
+ ing and innovative solution is to influence the behaviour
43
+ of consumers through the use of dynamic pricing (DP), so
44
+ that power supply and demand are better synchronised.
45
+ ∗Corresponding author.
46
+ Email addresses: thibaut.theate@uliege.be (Thibaut
47
+ Théate), dernst@uliege.be (Damien Ernst)
48
+ The dynamic pricing approach consists in continuously
49
+ adapting the electricity price that the final consumer has
50
+ to pay in order to influence its consumption behaviour.
51
+ Basically, when demand exceeds supply, the power price
52
+ would be increased in order to take down consumption.
53
+ Conversely, a reduced price would be provided when there
54
+ is excessive production compared to consumption. From a
55
+ graphical perspective, the objective is not only to shift the
56
+ daily consumption curve but also to change its shape in
57
+ order to better overlap with the intermittent production
58
+ curve of renewable energy sources. This is illustrated in
59
+ Figure 1 for a representative situation.
60
+ The innovative dynamic pricing approach relies on two
61
+ important assumptions. Firstly, the final consumer has to
62
+ be equipped with a smart metering device to measure its
63
+ production in real-time and with communication means
64
+ for the price signal. Secondly, the final consumer has to
65
+ be able to provide a certain amount of flexibility regarding
66
+ its power consumption. Moreover, it has to be sufficiently
67
+ receptive to the incentives offered to reduce its electricity
68
+ bill in exchange for a behaviour change. If these require-
69
+ ments are met, the major strength of the dynamic pricing
70
+ approach is its potential benefits for both the consumer
71
+ and the producer/retailer. Moreover, these benefits would
72
+ not only be in terms of economy, but also potentially in
73
+ terms of ecology and autonomy. In fact, dynamic prices
74
+ reward the flexibility of the demand side.
75
+ arXiv:2301.11587v1 [q-fin.TR] 27 Jan 2023
76
+
77
+ 12:00
78
+ 06:00
79
+ 18:00
80
+ 12:00
81
+ 06:00
82
+ 18:00
83
+ Supply
84
+ Demand
85
+ Time
86
+ Time
87
+ Power
88
+ Power
89
+ Figure 1: Illustration of the dynamic pricing approach’s potential
90
+ to shift and change the shape of a typical daily consumption curve
91
+ (blue) so that there is a better synchronisation with the daily inter-
92
+ mittent production curve of renewable energy sources (red).
93
+ The contributions of this research work are twofold.
94
+ Firstly, the complex decision-making problem faced by
95
+ a producer/retailer willing to develop a dynamic pricing
96
+ strategy is presented and rigorously formalised. Secondly,
97
+ the diverse algorithmic components required to efficiently
98
+ design a dynamic pricing policy are thoroughly discussed.
99
+ To the authors’ knowledge, demand response via dynamic
100
+ pricing has received considerable attention from the re-
101
+ search community, but from the perspective of the demand
102
+ side alone. Therefore, the present research may be consid-
103
+ ered as a pioneer work studying dynamic pricing from the
104
+ perspective of the supply side for taking advantage of the
105
+ flexibility of the power consumers.
106
+ This research paper is structured as follows. First of
107
+ all, the scientific literature about both dynamic pricing
108
+ and demand response is concisely reviewed in Section 2.
109
+ Then, Section 3 presents a detailed formalisation of the
110
+ decision-making problem behind the novel dynamic pric-
111
+ ing approach. Afterwards, Section 4 analyses the algorith-
112
+ mic components necessary for the development of dynamic
113
+ pricing policies. Subsequently, a fair performance assess-
114
+ ment methodology is introduced in Section 5 to quanti-
115
+ tatively evaluate the performance of a dynamic pricing
116
+ policy. To end this paper, Section 6 discusses interesting
117
+ research avenues for future work and draws conclusions.
118
+ 2. Literature review
119
+ Over the last decade, the management of the demand
120
+ side in the scope of the energy transition has received in-
121
+ creasing attention from the research community. In fact,
122
+ there exist multiple generic approaches when it comes to
123
+ demand response. Without getting into too many details,
124
+ the scientific literature includes some surveys summarising
125
+ and discussing the different techniques available together
126
+ with their associated challenges and benefits [5, 6, 7, 8, 9].
127
+ In this research work, the focus is exclusively set on the
128
+ demand response induced by dynamic power prices.
129
+ As previously mentioned, the scientific literature about
130
+ demand response via dynamic pricing is primarily focused
131
+ on the perspective of the demand side. Multiple techniques
132
+ have already been proposed to help the consumer provide
133
+ flexibility and take advantage of behavioural changes to
134
+ lower its electricity bill. For instance, [10] presents a power
135
+ scheduling method based on a genetic algorithm to opti-
136
+ mise residential demand response via an energy manage-
137
+ ment system, so that the electricity cost is reduced. In [11],
138
+ a technique based on dynamic programming is introduced
139
+ for determining the optimal schedule of residential con-
140
+ trollable appliances in the context of time-varying power
141
+ pricing. One can also mention [12] that proposes an energy
142
+ sharing model with price-based demand response for mi-
143
+ crogrids of peer-to-peer prosumers. The approach is based
144
+ on a distributed iterative algorithm and has been shown
145
+ to lower the prosumers’ costs and improve the sharing of
146
+ photovoltaic energy. More recently, (deep) reinforcement
147
+ learning techniques have been proven to be particularly
148
+ relevant for controlling the residential demand response in
149
+ the context of dynamic power prices [13, 14].
150
+ On the contrary, the question of inducing a residential
151
+ demand response based on a dynamic pricing approach
152
+ from the perspective of the supply side has not received
153
+ a lot of attention from the research community yet. Still,
154
+ there are a few works in the scientific literature about the
155
+ mathematical modelling of the demand response caused by
156
+ dynamic power prices, which is a key element in achieving
157
+ that objective. To begin with, [15] presents a simulation
158
+ model highlighting the evolution of electricity consump-
159
+ tion profiles when shifting from a fixed tariff to dynamic
160
+ power prices. The same objective is pursued by [16] which
161
+ introduces a fully data-driven approach relying on the data
162
+ collected by smart meters and exogenous variables. The
163
+ resulting simulation model is based on consumption pro-
164
+ files clustering and conditional variational autoencoders.
165
+ Alternatively, [17] presents a functional model of residen-
166
+ tial power consumption elasticity under dynamic pricing to
167
+ assess the impact of different electricity price levels, based
168
+ on a Bayesian probabilistic approach. In addition to these
169
+ mathematical models, one can also mention some real-life
170
+ experiments conducted to assess the responsiveness of res-
171
+ idential electricity demand to dynamic pricing [18, 19].
172
+ 2
173
+
174
+ 3. Problem formalisation
175
+ This section presents a mathematical formalisation of
176
+ the challenging sequential decision-making problem related
177
+ to the dynamic pricing approach for inducing a residential
178
+ demand response.
179
+ To begin with, the contextualisation
180
+ considered for studying this particular problem is briefly
181
+ described, followed by an overview of the decision-making
182
+ process. Then, a discretisation of the continuous timeline
183
+ is introduced. Subsequently, the formal definition of a dy-
184
+ namic pricing policy is presented. Lastly, the input and
185
+ output spaces of a dynamic pricing policy are described,
186
+ together with the objective criterion.
187
+ 3.1. Contextualisation
188
+ As previously hinted, this research work focuses on the
189
+ interesting real-case scenario of a producer/retailer whose
190
+ production portfolio is composed of an important share of
191
+ renewable energy sources such as wind turbines and photo-
192
+ voltaic panels. Because of the substantial intermittency of
193
+ these generation assets, a strong connection to the energy
194
+ markets is required in order to fully satisfy its customers
195
+ regardless of the weather. Nevertheless, the consumers are
196
+ assumed to be well informed and willing to adapt their be-
197
+ haviour in order to consume renewable energy rather than
198
+ electricity purchased on the market whose origin may be
199
+ unknown. Within this particular context, the benefits of
200
+ the dynamic pricing approach taking advantage of the con-
201
+ sumers’ flexibility are maximised. Indeed, the insignificant
202
+ marginal cost associated with these intermittent renew-
203
+ able energy sources coupled with their low carbon foot-
204
+ print make this innovative approach interesting from an
205
+ economical perspective for both supply and demand sides,
206
+ but also in terms of ecology. Moreover, the autonomy of
207
+ the producer/retailer is expected to be reinforced by low-
208
+ ering its dependence on the energy markets. At the same
209
+ time, dependence on fossil fuels may be reduced as well.
210
+ In this research work, the predicted difference between
211
+ power production and consumption is assumed to be fully
212
+ secured in the day-ahead electricity market. Also called
213
+ spot market, the day-ahead market has an hourly resolu-
214
+ tion and is operated once a day for all hours of the follow-
215
+ ing day via a single-blind auction. In other words, trading
216
+ power for hour H of day D has to be performed ahead on
217
+ day D − 1 between 00:00 AM (market opening) and 12:00
218
+ AM (market closure).
219
+ Therefore, the energy is at best
220
+ purchased 12 hours (00:00 AM of day D) up to 35 hours
221
+ (11:00 PM of day D) before the actual delivery of power.
222
+ Apart from the day-ahead electricity market, it is assumed
223
+ that there are no trading activities on the future/forward
224
+ nor intraday markets. Nevertheless, if there remains an
225
+ eventual mismatch between production and consumption
226
+ at the time of power delivery, the producer/retailer would
227
+ be exposed to the imbalance market. In this case, the so-
228
+ called imbalance price has to be inevitably paid as com-
229
+ pensation for pushing the power grid off balance.
230
+ 3.2. Decision-making process overview
231
+ The decision-making problem studied in this research
232
+ work is characterised by a particularity: a variable time lag
233
+ between the moment a decision is made and the moment it
234
+ becomes effective. As previously explained, any remaining
235
+ difference between production and consumption after de-
236
+ mand response has to ideally be traded on the day-ahead
237
+ market. The purpose of this assumption is to limit the ex-
238
+ posure of the producer/retailer to the imbalance market.
239
+ For this reason, the price signal sent to the consumer on
240
+ day D has to be generated before the closing of the day-
241
+ ahead market on day D − 1. Additionally, it is assumed
242
+ that the price signal cannot be refreshed afterwards.
243
+ Basically, the decision-making problem at hand can be
244
+ formalised as follows. The core objective is to determine
245
+ a decision-making policy, denoted Π, mapping at time τ
246
+ input information of diverse nature Iτ to the electricity
247
+ price signal Sτ to be sent to the consumers over a future
248
+ time horizon well-defined:
249
+ Sτ = Π(Iτ),
250
+ (1)
251
+ where:
252
+ • Iτ represents the information vector gathering all the
253
+ available information (of diverse nature) at time τ
254
+ which may be helpful to make a relevant dynamic
255
+ pricing decision,
256
+ • Sτ represents a set of electricity prices generated at
257
+ time τ and shaping the dynamic price signal over a
258
+ well-defined future time horizon.
259
+ The dynamic pricing approach from the perspective of
260
+ the supply side belongs to a particular class of decision-
261
+ making problems:
262
+ automated planning and scheduling.
263
+ Contrarily to conventional decision-making outputting one
264
+ action at a time, planning decision-making is concerned
265
+ with the generation of a sequence of actions.
266
+ In other
267
+ words, a planning decision-making problem requires to
268
+ synthesise in advance a strategy or plan of actions over
269
+ a certain time horizon. Formally, the decision-making has
270
+ to be performed at a specific time τ about a control vari-
271
+ able over a future time horizon beginning at time τi > τ
272
+ and ending at time τf > τi. In this case, the decision-
273
+ making is assumed to be performed just before the closing
274
+ of the day-ahead market at 12:00 AM to determine the
275
+ price signal to be sent to the consumers throughout the
276
+ entire following day (from 00:00 AM to 11:59 PM).
277
+ In the next sections, a more accurate and thorough
278
+ mathematical formalisation of the dynamic pricing prob-
279
+ lem from the perspective of the supply side is presented.
280
+ Moreover, the planning problem previously introduced is
281
+ cast into a sequential decision-making problem. Indeed,
282
+ this research paper intends to focus on a decision-making
283
+ policy outputting a single price from the signal Sτ at a
284
+ time based on a subset of the information vector Iτ.
285
+ 3
286
+
287
+ 00:00
288
+ 12:00
289
+ 𝑦� 𝑦�
290
+ 𝑦��
291
+ Closing of the
292
+ day-ahead market
293
+ 𝑥�
294
+ 𝑥�
295
+ 𝑥�
296
+ 𝑥��
297
+ Dynamic pricing
298
+ policy π
299
+ 𝑦�
300
+ Forecasts
301
+ 𝑝�
302
+
303
+ 𝑐�
304
+
305
+ 𝜆�
306
+
307
+ 𝑐�
308
+
309
+ Time
310
+ Power
311
+ Time
312
+ Time
313
+ Time
314
+ Power
315
+ Power
316
+ Price
317
+ Demand
318
+ reponse
319
+ model
320
+ Figure 2: Illustration of the formalised decision-making problem related to dynamic pricing from the perspective of the supply side. The
321
+ notations xt and yt represent the inputs and outputs of a dynamic pricing policy π, which are not shown concurrent on the timeline since
322
+ the decision-making occurs multiple hours before the application of the dynamic pricing signal. The time axis of the four plots represents the
323
+ complete following day for which the dynamic prices are generated. The mathematical notations pF
324
+ t , cF
325
+ t and λF
326
+ t respectively represent the
327
+ forecast production, consumption and day-ahead market price for the time step t. Finally, the quantity c′
328
+ t is the predicted consumption at
329
+ time step t after taking into consideration the dynamic pricing signal.
330
+ 3.3. Timeline discretisation
331
+ Theoretically, the dynamic electricity price signal sent
332
+ to the consumer could be continuously changing over time.
333
+ More realistically, this research work adopts a discretisa-
334
+ tion of the continuous timeline so that this power price
335
+ is adapted at regular intervals.
336
+ Formally, this timeline
337
+ is discretised into a number of time steps t spaced by a
338
+ constant duration ∆t. If the duration ∆t is too large, the
339
+ synchronisation improvement between supply and demand
340
+ will probably be of poor quality. Conversely, lowering the
341
+ value of the duration ∆t increases the complexity of the
342
+ decision-making process, and a too high update frequency
343
+ may even confuse the consumer. There is a trade-off to
344
+ be found concerning this important parameter. In this re-
345
+ search work, the dynamic price signal is assumed to change
346
+ once per hour, meaning that ∆t is equal to one hour. This
347
+ choice is motivated by the hourly resolution of the day-
348
+ ahead market, which has proven to be an appropriate com-
349
+ promise over the years for matching power production and
350
+ consumption. Another relevant discretisation choice could
351
+ be to have a price signal which is updated every quarter of
352
+ an hour. In the rest of this research paper, the increment
353
+ (decrement) operations t + 1 (t − 1) are used to model the
354
+ discrete transition from time step t to time step t + ∆t
355
+ (t − ∆t), for the sake of clarity.
356
+ 3.4. Dynamic pricing policy
357
+ Within the context previously described, a dynamic
358
+ pricing planning policy Π consists of the set of rules used
359
+ to make a decision regarding the future price signal sent to
360
+ the consumers over the next day. This planning policy can
361
+ be decomposed into a set of 24 dynamic pricing decision-
362
+ making policies π outputting a single electricity price for
363
+ one hour of the following day.
364
+ Mathematically, such a
365
+ dynamic pricing strategy can be defined as a programmed
366
+ policy π : X → Y, either deterministic or stochastic, which
367
+ outputs a decision yt ∈ Y for time step t based on some
368
+ input information xt ∈ X so as to maximise an objective
369
+ criterion. The input xt is derived from the information vec-
370
+ tor Iτ associated with the decision-making for time step t,
371
+ after potential preprocessing operations. The price signal
372
+ Sτ is composed of 24 dynamic pricing policy outputs yt.
373
+ In the rest of this research work, the time at which
374
+ the decision-making does occur should not be confused
375
+ with the time at which the dynamic price signal is active
376
+ (charging for energy consumption). The proposed formal-
377
+ isation assumes that the time step t refers to the time at
378
+ which the dynamic price is active, not decided. Therefore,
379
+ the decision-making of the dynamic pricing policy for time
380
+ step t (yt = π(xt)) is in fact performed hours in advance
381
+ of time step t. This complexity is illustrated in Figure 2
382
+ describing the formalised decision-making problem.
383
+ 4
384
+
385
+ I.3.5. Input of a dynamic pricing policy
386
+ The input space X of a dynamic pricing policy π com-
387
+ prises all the available information which may help to make
388
+ a relevant decision about future electricity prices so that
389
+ an appropriate demand response is induced.
390
+ Since the
391
+ decision-making occurs 12 up to 35 hours in advance of the
392
+ price signal delivery, this information mainly consists of
393
+ forecasts and estimations that are subject to uncertainty.
394
+ As depicted in Figure 2, the dynamic pricing policy input
395
+ xt ∈ X refers to the decision-making occurring at time
396
+ τ = t − h with h ∈ [12, 35] about the dynamic pricing
397
+ signal delivered to the consumer at time step t. In fact,
398
+ the quantity Iτ may be seen as the information contained
399
+ in the 24 inputs xt for t ∈ {τ + 12, ..., τ + 35}. Formally,
400
+ the input xt ∈ X is decided to be defined as follows:
401
+ xt = {P F
402
+ t , CF
403
+ t , ΛF
404
+ t , Yt, M},
405
+ (2)
406
+ where:
407
+ • P F
408
+ t
409
+ = {pF
410
+ t+ϵ ∈ R+ | ϵ = −k, ..., k} represents a set
411
+ of forecasts for the power production within a time
412
+ window centred around time step t and of size k,
413
+ • CF
414
+ t = {cF
415
+ t+ϵ ∈ R+ | ϵ = −k, ..., k} represents a set of
416
+ forecasts for the power consumption within a time
417
+ window centred around time step t and of size k,
418
+ • ΛF
419
+ t = {λF
420
+ t+ϵ ∈ R | ϵ = −k, ..., k} represents a set of
421
+ forecasts for the day-ahead market prices within a
422
+ window centred around time step t and of size k,
423
+ • Yt = {yt−ϵ ∈ R | ϵ = 1, ..., k} represents the series of
424
+ k previous values for the dynamic price signal sent
425
+ to the final consumer,
426
+ • M is a mathematical model of the demand response
427
+ to be expected from the consumption portfolio, with
428
+ the required input information.
429
+ The different forecasting models and the challenging
430
+ modelling of the consumption portfolio demand response
431
+ are discussed in more details in Section 4.
432
+ 3.6. Output of a dynamic pricing policy
433
+ The output space Y of a dynamic pricing policy π only
434
+ includes the future price signal to be sent to the consumer.
435
+ Formally, the dynamic pricing policy output yt ∈ Y, which
436
+ represents the electricity price to be paid by the consumer
437
+ for its power consumption at time step t, is mathematically
438
+ defined as follows:
439
+ yt = et,
440
+ (3)
441
+ where et ∈ R represents the dynamic electricity price to
442
+ be paid by the demand side for its power consumption
443
+ at time step t.
444
+ Out of the scope of this research work
445
+ is the presentation of this price signal so that the impact
446
+ on the final consumer is maximised. Indeed, the way of
447
+ communicating the output of the dynamic pricing policy
448
+ has to be adapted to the audience, be it humans with
449
+ different levels of electricity market expertise or algorithms
450
+ (energy management systems).
451
+ 3.7. Objective criterion
452
+ The dynamic pricing approach can provide multiple
453
+ benefits, in terms of economy, ecology but also autonomy.
454
+ Consequently, the objective criterion to be maximised by
455
+ a dynamic pricing policy π is not trivially determined. In
456
+ fact, several core objectives can be clearly identified:
457
+ • maximising the match between supply and demand,
458
+ • minimising the carbon footprint of power generation,
459
+ • minimising the electricity costs for the consumer,
460
+ • maximising the revenue of the producer/retailer.
461
+ Although some objectives overlap, these four criteria
462
+ are not completely compatible. For instance, maximising
463
+ the synchronisation between power supply and demand is
464
+ equivalent to minimising the carbon footprint associated
465
+ with the generation of electricity. Indeed, the production
466
+ portfolio of the producer/retailer being mainly composed
467
+ of intermittent renewable energy sources, its energy has a
468
+ reduced carbon footprint compared to the electricity that
469
+ can be purchased on the day-ahead market whose origin is
470
+ unknown. On the contrary, maximising the revenue of the
471
+ producer/retailer will obviously not lead to a minimised
472
+ electricity bill for the consumer. This research work makes
473
+ the choice to prioritise the maximisation of the synchroni-
474
+ sation between supply and demand, and equivalently the
475
+ minimisation of the carbon footprint, while translating the
476
+ other two core objectives into relevant constraints. Firstly,
477
+ the costs for the consumer have to be reduced with respect
478
+ to the situation without dynamic pricing. Secondly, the
479
+ profitability of the producer/retailer has to be guaranteed.
480
+ Formally, the objective criterion to be optimised by a
481
+ dynamic pricing policy π can be mathematically defined
482
+ as the following. First of all, the main target to evaluate
483
+ is the synchronisation between supply and demand, which
484
+ can be quantitatively assessed through the deviation ∆T .
485
+ This quantity has to ideally be minimised, and can be
486
+ mathematically expressed as follows:
487
+ ∆T =
488
+ T −1
489
+
490
+ t=0
491
+ |pt − ct|,
492
+ (4)
493
+ where:
494
+ • t = 0 corresponds to the first electricity delivery hour
495
+ of a new day (00:00 AM),
496
+ • T is the time horizon considered, which should be a
497
+ multiple of 24 to have full days,
498
+ • pt is the actual power production (not predicted)
499
+ from the supply side at time step t,
500
+ • ct is the actual power consumption (not predicted)
501
+ from the demand side at time step t.
502
+ 5
503
+
504
+ Afterwards, the first constraint concerning the reduced
505
+ costs for the consumer has to be modelled mathematically.
506
+ This is achieved via the electricity bill BT paid by the
507
+ consumer over the time horizon T, which can be expressed
508
+ as the following:
509
+ BT =
510
+ T −1
511
+
512
+ t=0
513
+ ct yt .
514
+ (5)
515
+ As previously explained, the consumer power bill BT
516
+ should not exceed that obtained without dynamic pricing.
517
+ In that case, the consumer is assumed to pay a price et,
518
+ which can for instance be a fixed tariff or a price indexed
519
+ on the day-ahead market price.
520
+ The situation without
521
+ dynamic pricing is discussed in more details in Section 5.
522
+ Consequently, the first constraint can be mathematically
523
+ expressed as follows:
524
+ T −1
525
+
526
+ t=0
527
+ ct yt ≤
528
+ T −1
529
+
530
+ t=0
531
+ ct et ,
532
+ (6)
533
+ where ct is the power consumption from the demand side
534
+ at time step t without dynamic pricing.
535
+ Then, the second constraint is about the profitability
536
+ of the producer/retailer, which is achieved if its revenue
537
+ exceeds its costs. The revenue RT of the producer/retailer
538
+ over the time horizon T can be mathematically expressed
539
+ as the following:
540
+ RT =
541
+ T −1
542
+
543
+ t=0
544
+
545
+ ct yt − (c′
546
+ t − pF
547
+ t ) λt − (ct − pt) it
548
+
549
+ ,
550
+ (7)
551
+ where:
552
+ • λt is the actual power price (not predicted) on the
553
+ day-ahead market at time step t,
554
+ • it is the actual imbalance price (not predicted) on
555
+ the imbalance market at time step t,
556
+ • c′
557
+ t is the predicted power consumption at time step t
558
+ after demand response to the dynamic prices, based
559
+ on the demand response mathematical model M.
560
+ The first term corresponds to the payment of the cus-
561
+ tomers for their electricity consumption. The second term
562
+ is the revenue or cost induced by the predicted mismatch
563
+ between supply and demand, which is traded on the day-
564
+ ahead market. The last term is the cost or revenue caused
565
+ by the remaining imbalance between supply and demand,
566
+ which has to be compensated in the imbalance market.
567
+ The total costs incurred by the producer/retailer at
568
+ each time step t can be decomposed into both fixed costs
569
+ FC and marginal costs MC. In this particular case, the
570
+ marginal costs of production are assumed to be negligible
571
+ since the production portfolio is composed of intermittent
572
+ renewable energy sources such as wind turbines and pho-
573
+ tovoltaic panels. Therefore, the second constraint can be
574
+ mathematically expressed as follows:
575
+ T −1
576
+
577
+ t=0
578
+
579
+ ct yt − (c′
580
+ t − pF
581
+ t ) λt − (ct − pt) it
582
+
583
+ ≥ FC T .
584
+ (8)
585
+ Finally, the complete objective criterion to be opti-
586
+ mised by a dynamic pricing policy can be mathematically
587
+ expressed as follows:
588
+ minimise
589
+ π
590
+ T −1
591
+
592
+ t=0
593
+ |pt − ct|,
594
+ subject to
595
+ RT ≥ FC T ,
596
+ BT ≤
597
+ T −1
598
+
599
+ t=0
600
+ ct et .
601
+ (9)
602
+ 4. Algorithmic components discussion
603
+ This section presents a thorough discussion about the
604
+ different algorithmic modules required to efficiently design
605
+ a dynamic pricing policy from the perspective of the supply
606
+ side. Firstly, the different forecasting blocks are rigorously
607
+ analysed. Secondly, the modelling of the demand response
608
+ induced by dynamic prices is discussed. Lastly, the proper
609
+ management of uncertainty is considered.
610
+ In parallel, for the sake of clarity, Figure 3 highlights
611
+ the interconnections between the different algorithmic com-
612
+ ponents in the scope of a dynamic pricing policy from the
613
+ perspective of the supply side.
614
+ Moreover, Algorithm 1
615
+ provides a thorough description of the complete decision-
616
+ making process for the dynamic pricing problem at hand.
617
+ The complexity of the variable time lag between decision-
618
+ making and application is highlighted. Assuming that the
619
+ decision-making occurs once a day at 12:00 AM just be-
620
+ fore the closing of the day-ahead market for all hours of
621
+ the following day, the dynamic price at time step t is de-
622
+ cided hours in advance at time step t − [12 + (t%24)] with
623
+ the symbol % representing the modulo operation.
624
+ 4.1. Production forecasting
625
+ The first forecasting block to be discussed concerns the
626
+ production of intermittent renewable energy sources such
627
+ as wind turbines and photovoltaic panels. Indeed, having
628
+ access to accurate predictions about the future output of
629
+ the production portfolio is key to the performance of a
630
+ dynamic pricing policy from the perspective of the supply
631
+ side. As previously explained in Section 3.4, the forecasts
632
+ have to be available one day ahead before the closing of
633
+ the day-ahead electricity market for all hours of the fol-
634
+ lowing day. Naturally, the generation of such predictions
635
+ introduces uncertainty, a complexity that has to be taken
636
+ into account to design sound dynamic pricing policies.
637
+ 6
638
+
639
+ 𝑝�
640
+
641
+ 𝑐�
642
+
643
+ 𝜆�
644
+
645
+ Time
646
+ Time
647
+ Time
648
+ Power
649
+ Power
650
+ Price
651
+ 𝑐�
652
+
653
+ Time
654
+ Power
655
+ 𝑐�
656
+
657
+ Time
658
+ Power
659
+ 𝑦�
660
+ Time
661
+ Price
662
+ Figure 3: Illustration of the complete decision-making process related to dynamic pricing from the perspective of the supply side, with the
663
+ connections between the different algorithmic components highlighted.
664
+ Algorithm 1 Dynamic pricing complete decision-making process
665
+ The decision-making occurs once per day before the closing of the day-ahead market at 12:00 AM for all hours of the next day.
666
+ The decision-making for the dynamic price of time step t occurs at time step t − [12 + (t%24)].
667
+ for τ = −12 to T − 12 do
668
+ Check whether the time is 12:00 AM to proceed to the decision-making.
669
+ if (τ + 12)%24 = 0 then
670
+ for t = τ + 12 to τ + 36 do
671
+ Gather the available information for production forecasting xP
672
+ t = {W F
673
+ t , AF
674
+ t , IP
675
+ t }.
676
+ Gather the available information for consumption forecasting xC
677
+ t = {W F
678
+ t , Tt, IC
679
+ t }.
680
+ Gather the available information for day-ahead market price forecasting xM
681
+ t
682
+ = {xP
683
+ t , xC
684
+ t , GF
685
+ t , Mt, IM
686
+ t }.
687
+ Forecast production at time step t: pF
688
+ t = FP
689
+
690
+ xP
691
+ t
692
+
693
+ .
694
+ Forecast consumption at time step t: cF
695
+ t = FC
696
+
697
+ xC
698
+ t
699
+
700
+ .
701
+ Forecast the day-ahead market price at time step t: λF
702
+ t = FM
703
+
704
+ xM
705
+ t
706
+
707
+ .
708
+ end for
709
+ for t = τ + 12 to τ + 36 do
710
+ Gather the input information for the dynamic pricing policy xt = {P F
711
+ t , CF
712
+ t , ΛF
713
+ t , Yt, M}.
714
+ Make a dynamic pricing decision for time step t: yt = π(xt).
715
+ end for
716
+ Announce the dynamic prices for all hours of the following day {yt | t = τ + 12, ..., τ + 35}.
717
+ end if
718
+ end for
719
+ 7
720
+
721
+ Dynamic pricing policy TX
722
+ MProduction forecasting FjConsumption forecasting F'Market price forecasting FDemand
723
+ resnonse
724
+ model MFormally, the forecasting model associated with the
725
+ output of the production portfolio is denoted FP . Its input
726
+ space XP comprises every piece of information that may
727
+ potentially have an impact on the generation of electricity
728
+ from intermittent renewable energy sources such as wind
729
+ turbines and photovoltaic panels for a certain time period.
730
+ Its output space YP is composed of a forecast regarding the
731
+ power generation from the production portfolio for that
732
+ same time period. Mathematically, the forecasting model
733
+ input xP
734
+ t ∈ XP and output yP
735
+ t ∈ YP at time step t can be
736
+ expressed as follows:
737
+ xP
738
+ t = {W F
739
+ t , AF
740
+ t , IP
741
+ t },
742
+ (10)
743
+ yP
744
+ t = pF
745
+ t ,
746
+ (11)
747
+ where:
748
+ • W F
749
+ t
750
+ represents various weather forecasts related to
751
+ the power production of intermittent renewable en-
752
+ ergy sources such as wind turbines and photovoltaic
753
+ panels (wind speed/direction, solar irradiance, etc.)
754
+ at the time step t,
755
+ • AF
756
+ t represents predictions about the available capac-
757
+ ity of the production portfolio at time step t, which
758
+ may be impacted by scheduled maintenance, repairs,
759
+ or other similar constraints,
760
+ • IP
761
+ t
762
+ represents any additional information that may
763
+ help to accurately forecast the future power gener-
764
+ ation of the producer/retailer’s production portfolio
765
+ at time step t.
766
+ In the scientific literature, the current state of the art
767
+ for forecasting the power production of intermittent renew-
768
+ able energy sources is mainly based on deep learning tech-
769
+ niques together with some data cleansing processes and
770
+ data augmentation approaches.
771
+ The best architectures
772
+ are recurrent neural networks (RNN), convolutional neural
773
+ networks (CNN) and transformers [20, 21, 22, 23, 24].
774
+ 4.2. Consumption forecasting
775
+ The objective of the next important forecasting model
776
+ deserving a discussion is to accurately predict the future
777
+ power demand of the consumption portfolio before any
778
+ demand response phenomenon is induced. Since the main
779
+ goal of a dynamic pricing policy is to maximise the syn-
780
+ chronisation between supply and demand, electricity load
781
+ forecasts are of equal importance to electricity generation
782
+ predictions. Similarly to the latter, the portfolio consump-
783
+ tion forecasts are assumed to be generated one day ahead
784
+ just before the closing of the day-ahead market for all 24
785
+ hours of the following day. Additionally, the uncertainty
786
+ associated with these predictions has to be seriously taken
787
+ into account for the success of the dynamic pricing policy.
788
+ From a more formal perspective, the forecasting model
789
+ responsible for predicting the future electricity load of the
790
+ consumption portfolio is denoted FC. Its input space XC
791
+ includes all the information that may have an influence on
792
+ the residential electricity consumption for a certain time
793
+ period. Its output space YC comprises a forecast of the
794
+ power used by the consumption portfolio for that same
795
+ time period. Mathematically, the consumption forecasting
796
+ model input xC
797
+ t ∈ XC and output yC
798
+ t ∈ YC at time step t
799
+ can be expressed as the following:
800
+ xC
801
+ t = {W F
802
+ t , Tt, IC
803
+ t },
804
+ (12)
805
+ yC
806
+ t = cF
807
+ t ,
808
+ (13)
809
+ where:
810
+ • W F
811
+ t
812
+ represents various weather forecasts related to
813
+ the residential electricity consumption (temperature,
814
+ hygrometry, etc.) at the time step t,
815
+ • Tt represents diverse characteristics related to the
816
+ time step t (hour, weekend, holiday, season, etc.),
817
+ • IC
818
+ t represents supplementary information that could
819
+ potentially have an influence on the residential power
820
+ consumption at time step t.
821
+ Similarly to renewable energy production forecasting,
822
+ the state-of-the-art approaches for predicting the residen-
823
+ tial electricity load in the short term are mostly related
824
+ to deep learning techniques with preprocessed augmented
825
+ data: RNN, CNN, and transformers [25, 26, 27, 28, 22].
826
+ 4.3. Market price forecasting
827
+ The last forecasting block to be discussed concerns
828
+ the future day-ahead electricity market prices. Contrarily
829
+ to the forecasting of power production and consumption,
830
+ these price predictions are not critical to the success of a
831
+ dynamic pricing policy from the perspective of the supply
832
+ side. Still, having access to quality forecasts for the fu-
833
+ ture day-ahead market prices remains important in order
834
+ to satisfy the constraints related to the profitability of the
835
+ producer/retailer as well as the reduced electricity costs for
836
+ the consumer. Once again, the predictions are assumed to
837
+ be made just before the closing of the day-ahead market.
838
+ Moreover, the uncertainty associated with these forecasts
839
+ has to be taken into consideration.
840
+ Formally, the forecasting model related to the future
841
+ day-ahead electricity market prices is denoted FM.
842
+ Its
843
+ input space XM includes every single piece of information
844
+ which may potentially explain the future electricity price
845
+ on the day-ahead market for a certain hour. Its output
846
+ space YM comprises a forecast of the day-ahead market
847
+ price for that same hour. Mathematically, both forecasting
848
+ 8
849
+
850
+ model input xM
851
+ t
852
+ ∈ XM and output yM
853
+ t
854
+ ∈ YM at time step
855
+ t can be expressed as follows:
856
+ xM
857
+ t
858
+ = {xP
859
+ t , xC
860
+ t , GF
861
+ t , Mt, IM
862
+ t },
863
+ (14)
864
+ yM
865
+ t
866
+ = λF
867
+ t ,
868
+ (15)
869
+ where:
870
+ • GF
871
+ t represents forecasts about the state of the power
872
+ grid as a whole (available production capacity, trans-
873
+ mission lines, etc.) at the time step t,
874
+ • Mt represents diverse information in various markets
875
+ related to energy (power, carbon, oil, gas, coal, etc.)
876
+ in neighbouring geographical areas at time step t,
877
+ • IM
878
+ t
879
+ represents any extra piece of information that
880
+ may help to predict the future electricity price on
881
+ the day-ahead market at time step t.
882
+ Once again, the scientific literature reveals that the
883
+ state-of-the-art approaches for day-ahead power market
884
+ price forecasting are mostly based on innovative machine
885
+ learning techniques [29, 30, 31, 32, 33].
886
+ 4.4. Demand response modelling
887
+ Another essential algorithmic component is the math-
888
+ ematical modelling of the residential demand response to
889
+ dynamic prices. In order to make relevant dynamic pric-
890
+ ing decisions, an estimation of the impact of the electricity
891
+ price on the consumer’s behaviour is necessary. In fact,
892
+ two important characteristics have to be studied:
893
+ The residential power consumption elasticity. This
894
+ quantity measures the average percentage change of the
895
+ residential power consumption in response to a percentage
896
+ change in the electricity price. In other words, the elastic-
897
+ ity captures the willingness of the consumer to adapt its
898
+ behaviour when the price of electricity either increases or
899
+ decreases. This elasticity is critical to the dynamic pricing
900
+ approach, since it assesses the receptiveness of the con-
901
+ sumers to dynamic prices. In fact, the residential power
902
+ consumption elasticity can be considered as a quantitative
903
+ indicator of the potential of the dynamic pricing approach.
904
+ The electricity load temporal dependence.
905
+ Time
906
+ plays an important role in power consumption.
907
+ Firstly,
908
+ the consumer’s behaviour is highly dependent on the time
909
+ of the day. The tendency to adapt this behaviour is also
910
+ expected to be time-dependent. Therefore, the residential
911
+ power consumption elasticity has to be a function of the
912
+ time within a day, among other things. Secondly, a higher
913
+ electricity price does not simply reduce the demand as with
914
+ other commodities, but rather shifts part of the consump-
915
+ tion earlier and/or later in time. This phenomenon reflects
916
+ a complex temporal dependence for power consumption,
917
+ which has to be accurately modelled in order to design a
918
+ performing dynamic pricing policy.
919
+ Formally, the mathematical model of the residential
920
+ demand response is denoted M.
921
+ Its input space XD is
922
+ composed of the predicted power consumption before any
923
+ demand response and the dynamic prices to be sent to the
924
+ consumers for several hours before and after the time pe-
925
+ riod analysed, together with information about that time
926
+ period. Its output space YD comprises the predicted power
927
+ consumption after demand response to dynamic prices for
928
+ that same time period. Mathematically, both demand re-
929
+ sponse model input xD
930
+ t ∈ XD and output yD
931
+ t ∈ YD at time
932
+ step t can be expressed as the following:
933
+ xD
934
+ t = {CF
935
+ t , Y ′
936
+ t , Tt},
937
+ (16)
938
+ yD
939
+ t = c′
940
+ t,
941
+ (17)
942
+ where Y ′
943
+ t = {yt+ϵ ∈ R | ϵ = −k, ..., k} is the dynamic price
944
+ signal within a time window centred around time step t
945
+ and of size k from which the demand response is induced.
946
+ As far as the scientific literature about the modelling of
947
+ demand response to dynamic prices is concerned, this in-
948
+ teresting topic has not yet received a lot of attention from
949
+ the research community. Still, there exists a few sound
950
+ works presenting demand response models and assessing
951
+ the receptiveness of the consumers to dynamic power prices
952
+ [15, 16, 17, 18, 19], as explained in Section 2.
953
+ 4.5. Uncertainty discussion
954
+ As previously hinted, a dynamic pricing policy has to
955
+ make its decisions based on imperfect information. Indeed,
956
+ multiple forecasts for the electricity price, production and
957
+ consumption have to be generated 12 up to 35 hours in
958
+ advance. Naturally, these predictions comes with a level
959
+ of uncertainty that should not be neglected.
960
+ Moreover,
961
+ accurately modelling the residential demand response to
962
+ dynamic prices is a particularly challenging task. Because
963
+ of both the random human nature and the difficulty to
964
+ fully capture the consumers’ behaviour within a mathe-
965
+ matical model, a notable level of uncertainty should also
966
+ be considered at this stage. Therefore, multiple sources of
967
+ uncertainty can be identified in the scope of the dynamic
968
+ pricing decision-making problem at hand, and a proper
969
+ management of this uncertainty is necessary.
970
+ A stochastic reasoning is recommended to make sound
971
+ dynamic pricing decisions despite this substantial level of
972
+ uncertainty. Instead of considering each uncertain variable
973
+ (production, consumption, price, demand response) with
974
+ a probability of 1, the full probability distribution behind
975
+ these quantities has to be estimated and exploited. Based
976
+ on this information, the risk associated with uncertainty
977
+ may be mitigated. Moreover, safety margins may also con-
978
+ tribute to reduce this risk, but potentially at the expense
979
+ of a lowered performance. In fact, there generally exists
980
+ 9
981
+
982
+ a trade-off between performance and risk, in line with the
983
+ adage: with great risk comes great reward.
984
+ 5. Performance assessment methodology
985
+ This section presents a methodology for quantitatively
986
+ assessing the performance of a dynamic pricing policy in
987
+ a comprehensive manner.
988
+ As explained in Section 3.7,
989
+ several disjoint objectives can be clearly identified.
990
+ For
991
+ the sake of completeness, this research work presents three
992
+ quantitative indicators, one for each objective. The rela-
993
+ tive importance of these indicators is left to the discretion
994
+ of the reader according to its main intention among the
995
+ different objectives previously defined.
996
+ The performance indicators proposed are based on the
997
+ comparison with the original situation without dynamic
998
+ pricing. In this case, the consumer is assumed to be fully
999
+ ignorant about the mismatch problem between supply and
1000
+ demand. No information is provided to the customers of
1001
+ the producer/retailer, which consequently have an unin-
1002
+ fluenced consumption behaviour. The price of electricity
1003
+ et is freely determined by the producer/retailer. It may
1004
+ for instance be a fixed tariff, or a price indexed on the
1005
+ day-ahead market price:
1006
+ et = α λt + β ,
1007
+ (18)
1008
+ where α and β are parameters to be set by the retailer.
1009
+ Firstly, the impact of a dynamic pricing policy on the
1010
+ synchronisation between power supply and demand can be
1011
+ assessed through the performance indicator S quantifying
1012
+ the relative evolution of the deviation ∆T . This quantity
1013
+ is mathematically expressed as follows:
1014
+ S = 100 ∆T − ∆T
1015
+ ∆T
1016
+ ,
1017
+ (19)
1018
+ ∆T =
1019
+ T −1
1020
+
1021
+ t=0
1022
+ |pt − ct| ,
1023
+ (20)
1024
+ where ∆T represents the lack of synchronisation between
1025
+ supply and demand without dynamic pricing. Therefore,
1026
+ the quantity S has ideally to be maximised, with a perfect
1027
+ synchronisation between supply and demand leading to a
1028
+ value of 100% reduction in deviation.
1029
+ Secondly, the consequence for the consumer regarding
1030
+ its electricity bill can be evaluated with the quantity B
1031
+ which informs about the relative evolution of this power
1032
+ bill. It can be mathematically computed as the following:
1033
+ B = 100 BT − BT
1034
+ BT
1035
+ ,
1036
+ (21)
1037
+ where BT = �T −1
1038
+ t=0 ct et represents the electricity bill paid
1039
+ by the consumer without dynamic pricing. Since the per-
1040
+ formance indicator B represents the percentage reduction
1041
+ in costs, it has to ideally be maximised.
1042
+ Lastly, the enhancement in terms of revenue for the
1043
+ producer/retailer can be efficiently quantified thanks to
1044
+ the performance indicator R. This quantity represents the
1045
+ relative evolution of the producer/retailer revenue and can
1046
+ be mathematically expressed as follows:
1047
+ R = 100 RT − RT
1048
+ RT
1049
+ ,
1050
+ (22)
1051
+ RT =
1052
+ T −1
1053
+
1054
+ t=0
1055
+
1056
+ ct et − (cF
1057
+ t − pF
1058
+ t ) λt − (ct − pt) it
1059
+
1060
+ ,
1061
+ (23)
1062
+ where RT represents the producer/retailer revenue with-
1063
+ out dynamic pricing. Obviously, the performance indicator
1064
+ R has to ideally be maximised.
1065
+ 6. Conclusion
1066
+ This research paper presents a detailed formalisation of
1067
+ the decision-making problem faced by a producer/retailer
1068
+ willing to adopt a dynamic pricing approach, in order to
1069
+ induce an appropriate residential demand response. Three
1070
+ core challenges are highlighted by this formalisation work.
1071
+ Firstly, the objective criterion maximised by a dynamic
1072
+ pricing policy is not trivially defined, since different goals
1073
+ that are not compatible can be clearly identified. Secondly,
1074
+ several complex algorithmic components are necessary for
1075
+ the development of a performing dynamic pricing policy.
1076
+ One can for instance mention different forecasting blocks,
1077
+ but also a mathematical model of the residential demand
1078
+ response to dynamic prices. Thirdly, the dynamic pricing
1079
+ decisions have to be made based on imperfect information,
1080
+ because this particular decision-making problem is highly
1081
+ conditioned by the actual uncertainty for the future.
1082
+ Several avenues are proposed for future work. In fact,
1083
+ the natural extension of the present research is to design
1084
+ innovative dynamic pricing policies from the perspective
1085
+ of the supply side based on the formalisation performed.
1086
+ While the present research paper exclusively focuses on
1087
+ the philosophy and conceptual analysis of the approach,
1088
+ there remain practical concerns that need to be properly
1089
+ addressed in order to achieve performing decision-making
1090
+ policies. To achieve that, a deeper analysis of the scientific
1091
+ literature about each algorithmic component discussed in
1092
+ Section 4 is firstly welcomed, in order to identify and re-
1093
+ produce the state-of-the-art techniques within the context
1094
+ of interest. Then, different approaches have to be investi-
1095
+ gated for the design of the dynamic pricing policy itself.
1096
+ One can for instance mention, among others, the stochastic
1097
+ optimisation and deep reinforcement learning techniques.
1098
+ Finally, the dynamic pricing policies developed have to be
1099
+ rigorously evaluated, analysed, and compared by taking
1100
+ advantage of real-life experiments.
1101
+ 10
1102
+
1103
+ Acknowledgements
1104
+ Thibaut Théate is a Research Fellow of the F.R.S.-
1105
+ FNRS, of which he acknowledges the financial support.
1106
+ References
1107
+ [1] IPCC, Climate Change 2021: The Physical Science Basis. Con-
1108
+ tribution of Working Group I to the Sixth Assessment Report
1109
+ of the Intergovernmental Panel on Climate Change, Cambridge
1110
+ University Press, Cambridge, United Kingdom and New York,
1111
+ NY, USA, 2021.
1112
+ [2] H.
1113
+ Ritchie,
1114
+ M.
1115
+ Roser,
1116
+ Energy,
1117
+ Our
1118
+ World
1119
+ in
1120
+ Data,
1121
+ https://ourworldindata.org/energy (2020).
1122
+ [3] S. Chatzivasileiadis, D. Ernst, G. Andersson, The global grid,
1123
+ Renewable Energy 57 (2013) 372–383.
1124
+ [4] N. Kittner, F. Lill, D. M. Kammen, Energy storage deployment
1125
+ and innovation for the clean energy transition, Nature Energy
1126
+ 2 (2017) 17125.
1127
+ [5] P. Palensky, D. Dietrich, Demand side management: Demand
1128
+ response, intelligent energy systems, and smart loads, IEEE
1129
+ Transactions on Industrial Informatics 7 (2011) 381–388.
1130
+ [6] P. Siano, Demand response and smart grids — A survey, Re-
1131
+ newable & Sustainable Energy Reviews 30 (2014) 461–478.
1132
+ [7] R. Deng, Z. Yang, M.-Y. Chow, J. Chen, A survey on demand
1133
+ response in smart grids: Mathematical models and approaches,
1134
+ IEEE Transactions on Industrial Informatics 11 (2015) 570–582.
1135
+ [8] J. S. Vardakas, N. Zorba, C. V. Verikoukis, A survey on de-
1136
+ mand response programs in smart grids: Pricing methods and
1137
+ optimization algorithms, IEEE Communications Surveys & Tu-
1138
+ torials 17 (2015) 152–178.
1139
+ [9] H. T. Haider, O. H. See, W. Elmenreich, A review of residen-
1140
+ tial demand response of smart grid, Renewable & Sustainable
1141
+ Energy Reviews 59 (2016) 166–178.
1142
+ [10] Z. Zhao, W. C. Lee, Y. Shin, K.-B. Song, An optimal power
1143
+ scheduling method for demand response in home energy man-
1144
+ agement system, IEEE Transactions on Smart Grid 4 (2013)
1145
+ 1391–1400.
1146
+ [11] M. Muratori, G. Rizzoni, Residential demand response: Dy-
1147
+ namic energy management and time-varying electricity pricing,
1148
+ IEEE Transactions on Power Systems 31 (2016) 1108–1117.
1149
+ [12] N. Liu, X. Yu, C. Wang, C. Li, L. Ma, J. Lei, Energy-sharing
1150
+ model with price-based demand response for microgrids of peer-
1151
+ to-peer prosumers, IEEE Transactions on Power Systems 32
1152
+ (2017) 3569–3583.
1153
+ [13] J. R. Vázquez-Canteli, Z. Nagy, Reinforcement learning for de-
1154
+ mand response:
1155
+ A review of algorithms and modeling tech-
1156
+ niques, Applied Energy (2019).
1157
+ [14] H. Li, Z. Wan, H. He, Real-time residential demand response,
1158
+ IEEE Transactions on Smart Grid 11 (2020) 4144–4154.
1159
+ [15] S. Gottwalt, W. Ketter, C. Block, J. Collins, C. Weinhardt, De-
1160
+ mand side management — A simulation of household behavior
1161
+ under variable prices, Energy Policy 39 (2011) 8163–8174.
1162
+ [16] M. Brégère, R. J. Bessa, Simulating tariff impact in electrical en-
1163
+ ergy consumption profiles with conditional variational autoen-
1164
+ coders, IEEE Access 8 (2020) 131949–131966.
1165
+ [17] K. Ganesan, J. T. Saraiva, R. J. Bessa, Functional model of
1166
+ residential consumption elasticity under dynamic tariffs, Energy
1167
+ and Buildings 255 (2022).
1168
+ [18] Y. He, B. Wang, J. Wang, J. Wang, W. Xiong, T. Xia, Residen-
1169
+ tial demand response behavior analysis based on Monte Carlo
1170
+ simulation: The case of Yinchuan in China, Energy 47 (2012)
1171
+ 230–236.
1172
+ [19] E. A. M. Klaassen, C. B. A. Kobus, J. Frunt, J. G. Slootweg, Re-
1173
+ sponsiveness of residential electricity demand to dynamic tariffs:
1174
+ Experiences from a large field test in the Netherlands, Applied
1175
+ Energy 183 (2016) 1065–1074.
1176
+ [20] C. Sweeney, R. J. Bessa, J. Browell, P. Pinson, The future of
1177
+ forecasting for renewable energy, WIREs Energy and Environ-
1178
+ ment (2019).
1179
+ [21] R. Ahmed, V. Sreeram, Y. D. Mishra, M. Arif, A review and
1180
+ evaluation of the state-of-the-art in PV solar power forecasting:
1181
+ Techniques and optimization, Renewable & Sustainable Energy
1182
+ Reviews 124 (2020) 109792.
1183
+ [22] S. Aslam, H. Herodotou, S. M. Mohsin, N. Javaid, N. Ashraf,
1184
+ S. Aslam, A survey on deep learning methods for power load and
1185
+ renewable energy forecasting in smart microgrids, Renewable
1186
+ and Sustainable Energy Reviews 144 (2021).
1187
+ [23] H. Jahangir, H. Tayarani, S. S. Gougheri, M. A. Golkar, A. Ah-
1188
+ madian, A. Elkamel, Deep learning-based forecasting approach
1189
+ in smart grids with microclustering and bidirectional LSTM net-
1190
+ work, IEEE Transactions on Industrial Electronics 68 (2021)
1191
+ 8298–8309.
1192
+ [24] D. Heinemann, E. Lorenz, M. Girodo, Forecasting solar radia-
1193
+ tion, Journal of Cases on Information Technology (2021).
1194
+ [25] W. Kong, Z. Y. Dong, Y. Jia, D. J. Hill, Y. Xu, Y. Zhang, Short-
1195
+ term residential load forecasting based on LSTM recurrent neu-
1196
+ ral network, IEEE Transactions on Smart Grid 10 (2019) 841–
1197
+ 851.
1198
+ [26] N. Somu, G. R. M. R, K. Ramamritham, A hybrid model for
1199
+ building energy consumption forecasting using long short term
1200
+ memory networks, Applied Energy 261 (2020) 114131.
1201
+ [27] X. bo Jin, W. Zheng, J. Kong, X. Wang, Y. Bai, T. Su, S. Lin,
1202
+ Deep-learning forecasting method for electric power load via
1203
+ attention-based encoder-decoder with bayesian optimization,
1204
+ Energies 14 (2021) 1596.
1205
+ [28] A. Gasparin, S. Lukovic, C. Alippi, Deep learning for time se-
1206
+ ries forecasting: The electric load case, CAAI Transactions on
1207
+ Intelligence Technology 7 (2021).
1208
+ [29] R. Weron, Electricity price forecasting: A review of the state-
1209
+ of-the-art with a look into the future, HSC Research Reports
1210
+ (2014).
1211
+ [30] J. Nowotarski, R. Weron, Recent advances in electricity price
1212
+ forecasting: A review of probabilistic forecasting, HSC Research
1213
+ Reports (2016).
1214
+ [31] A. R. Gollou, N. Ghadimi, A new feature selection and hy-
1215
+ brid forecast engine for day-ahead price forecasting of electricity
1216
+ markets, J. Intell. Fuzzy Syst. 32 (2017) 4031–4045.
1217
+ [32] U. Ugurlu, I. Oksuz, O. Tas, Electricity price forecasting using
1218
+ recurrent neural networks, Energies 11 (2018) 1255.
1219
+ [33] H. Jahangir, H. Tayarani, S. Baghali, A. Ahmadian, A. Elkamel,
1220
+ M. A. Golkar, M. Castilla, A novel electricity price forecasting
1221
+ approach based on dimension reduction strategy and rough ar-
1222
+ tificial neural networks, IEEE Transactions on Industrial Infor-
1223
+ matics 16 (2020) 2369–2381.
1224
+ 11
1225
+
7NFJT4oBgHgl3EQfmSym/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
99AzT4oBgHgl3EQfSvs8/content/2301.01236v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e28b81343d1235599c70f9c9c3cbb836066928ec77806cddf0286ee1db2aad3d
3
+ size 235377
99AzT4oBgHgl3EQfSvs8/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3de2cd09d96d1a7073c349d938045573fcf743d4d73ef29f7c29e67369cac9f9
3
+ size 852013
99AzT4oBgHgl3EQfSvs8/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fcc8c443c05193a07366f41e8a6a6e43dcc8017230bb01efdd9ff1761e18548
3
+ size 45291
9NE1T4oBgHgl3EQfCQIp/content/2301.02861v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccd65ccbf6f85c08c77cb8c1d494182757a2ec78e707beba11a988acedf286ab
3
+ size 114673
9NE1T4oBgHgl3EQfCQIp/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0574c340b17a38c24d7245b6f990cee3caeee5aa2e487e5ffb793b9ef975eb4f
3
+ size 1507373
9NE1T4oBgHgl3EQfCQIp/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c3fa6a4b842b7c08d2672b72d6039ec14afa3730a56f8fc0945b3965c770c7f
3
+ size 55233
B9E4T4oBgHgl3EQf5Q72/content/2301.05323v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:caf0dcededec6e9659b705392704534d7db404475dd4080c14388abe0806287d
3
+ size 13401543
BNE1T4oBgHgl3EQfVgSF/content/2301.03103v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dddc73af5082d97cfe9065c89d554d9247ecaeb42c6ad9f1ebd5eab411f5323
3
+ size 2237861
BNE1T4oBgHgl3EQfVgSF/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eae9c18cdd72effe082f552444c6dd1dc00cf95ed94baf6a6a0baff6f85029d0
3
+ size 6815789
CdE2T4oBgHgl3EQfSAcB/content/tmp_files/2301.03786v1.pdf.txt ADDED
@@ -0,0 +1,1015 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DiffTalk: Crafting Diffusion Models for Generalized Talking Head Synthesis
2
+ Shuai Shen1
3
+ Wenliang Zhao1
4
+ Zibin Meng1
5
+ Wanhua Li1
6
+ Zheng Zhu2
7
+ Jie Zhou1
8
+ Jiwen Lu1
9
+ 1Tsinghua University
10
+ 2PhiGent Robotics
11
+
12
+ Figure 1. We present a crafted conditional Diffusion model for generalized Talking head synthesis (DiffTalk). Given a driven audio, the
13
+ DiffTalk is capable of synthesizing high-fidelity and synchronized talking videos for multiple identities without further fine-tuning.
14
+ Abstract
15
+ Talking head synthesis is a promising approach for the
16
+ video production industry.
17
+ Recently, a lot of effort has
18
+ been devoted in this research area to improve the gener-
19
+ ation quality or enhance the model generalization. How-
20
+ ever, there are few works able to address both issues simul-
21
+ taneously, which is essential for practical applications. To
22
+ this end, in this paper, we turn attention to the emerging
23
+ powerful Latent Diffusion Models, and model the Talking
24
+ head generation as an audio-driven temporally coherent
25
+ denoising process (DiffTalk).
26
+ More specifically, instead
27
+ of employing audio signals as the single driving factor,
28
+ we investigate the control mechanism of the talking face,
29
+ and incorporate reference face images and landmarks as
30
+ conditions for personality-aware generalized synthesis. In
31
+ this way, the proposed DiffTalk is capable of producing
32
+ high-quality talking head videos in synchronization with the
33
+ source audio, and more importantly, it can be naturally gen-
34
+ eralized across different identities without any further fine-
35
+ tuning.
36
+ Additionally, our DiffTalk can be gracefully tai-
37
+ lored for higher-resolution synthesis with negligible extra
38
+ computational cost. Extensive experiments show that the
39
+ proposed DiffTalk efficiently synthesizes high-fidelity audio-
40
+ driven talking head videos for generalized novel identi-
41
+ ties. For more video results, please refer to this demon-
42
+ stration https://cloud.tsinghua.edu.cn/f/
43
+ e13f5aad2f4c4f898ae7/.
44
+ 1. Introduction
45
+ Talking head synthesis is a challenging and promising re-
46
+ search topic, which aims to synthesize a talking video with
47
+ given audio. This technique is widely applied in various
48
+ practical scenarios including animation, virtual avatars, on-
49
+ line education, and video conferencing [4,44,47,50,52].
50
+ Recently a lot of effort has been devoted to this re-
51
+ search area to improve the generation quality or enhance
52
+ the model generalization.
53
+ Among these existing main-
54
+ stream talking head generation approaches, the 2D-based
55
+ methods usually depend on generative adversarial networks
56
+ (GANs) [6, 10, 16, 22, 28] for audio-to-lip mapping, and
57
+ 1
58
+ arXiv:2301.03786v1 [cs.CV] 10 Jan 2023
59
+
60
+ most of them perform competently on model generalization.
61
+ However, since GANs need to simultaneously optimize a
62
+ generator and a discriminator, the training process lacks sta-
63
+ bility and is prone to mode collapse [11]. Due to this re-
64
+ striction, the generated talking videos are of limited image
65
+ quality, and difficult to scale to higher resolutions. By con-
66
+ trast, 3D-based methods [2,17,42,46,53] perform better in
67
+ synthesizing higher-quality talking videos. Whereas, they
68
+ highly rely on identity-specific training, and thus cannot
69
+ generalize across different persons. Such identity-specific
70
+ training also brings heavy resource consumption and is not
71
+ friendly to practical applications. Most recently, there are
72
+ some 3D-based works [36] that take a step towards improv-
73
+ ing the generalization of the model. However, further fine-
74
+ tuning on specific identities is still inevitable.
75
+ Generation quality and model generalization are two es-
76
+ sential factors for better deployment of the talking head syn-
77
+ thesis technique to real-world applications. However, few
78
+ existing works are able to address both issues well. In this
79
+ paper, we propose a crafted conditional Diffusion model for
80
+ generalized Talking head synthesis (DiffTalk), that aims to
81
+ tackle these two challenges simultaneously. Specifically, to
82
+ avoid the unstable training of GANs, we turn attention to
83
+ the recently developed generative technology Latent Dif-
84
+ fusion Models [30], and model the talking head synthe-
85
+ sis as an audio-driven temporally coherent denoising pro-
86
+ cess. On this basis, instead of utilizing audio signals as
87
+ the single driving factor to learn the audio-to-lip transla-
88
+ tion, we further incorporate reference face images and land-
89
+ marks as supplementary conditions to guide the face iden-
90
+ tity and head pose for personality-aware video synthesis.
91
+ Under these designs, the talking head generation process
92
+ is more controllable, which enables the learned model to
93
+ naturally generalize across different identities without fur-
94
+ ther fine-tuning. As shown in Figure 1, with a sequence
95
+ of driven audio, our DiffTalk is capable of producing natu-
96
+ ral talking videos of different identities based on the corre-
97
+ sponding reference videos. Moreover, benefiting from the
98
+ latent space learning mode, our DiffTalk can be gracefully
99
+ tailored for higher-resolution synthesis with negligible ex-
100
+ tra computational cost, which is meaningful for improving
101
+ the generation quality.
102
+ Extensive experiments show that our DiffTalk can syn-
103
+ thesize high-fidelity talking videos for novel identities with-
104
+ out any further fine-tuning. Figure 1 shows the generated
105
+ talking sequences with one driven audio across three differ-
106
+ ent identities. Comprehensive method comparisons show
107
+ the superiority of the proposed DiffTalk, which provides a
108
+ strong baseline for the high-performance talking head syn-
109
+ thesis. To summarize, we make the following contributions:
110
+ • We propose a crafted conditional diffusion model for
111
+ high-quality and generalized talking head synthesis. By
112
+ introducing smooth audio signals as a condition, we
113
+ model the generation as an audio-driven temporally co-
114
+ herent denoising process.
115
+ • For personality-aware generalized synthesis, we further
116
+ incorporate dual reference images as conditions. In this
117
+ way, the trained model can be generalized across different
118
+ identities without further fine-tuning.
119
+ • The proposed DiffTalk can generate high-fidelity and
120
+ vivid talking videos for generalized identities. In exper-
121
+ iment, our DiffTalk significantly outperforms 2D-based
122
+ methods in the generated image quality, while surpassing
123
+ 3D-based works in the model generalization ability.
124
+ 2. Related Work
125
+ Audio-driven Talking Head Synthesis.
126
+ The talking
127
+ head synthesis aims to generate talking videos with lip
128
+ movements synchronized with the driving audio [14, 40].
129
+ In terms of the modeling approach, we roughly divide the
130
+ existing methods into 2D-based and 3D-based ones.
131
+ In
132
+ the 2D-based methods, GANs [6, 10, 16, 28] are usually
133
+ employed as the core technologies for learning the audio-
134
+ to-lip translation.
135
+ Zhou et al. [52] introduce a speaker-
136
+ aware audio encoder for personalized head motion model-
137
+ ing. Prajwal et al. [28] boost the lip-visual synchroniza-
138
+ tion with a well-trained Lip-Sync expert [8].
139
+ However,
140
+ since the training process of GANs lacks stability and is
141
+ prone to mode collapse [11], the generated talking videos
142
+ are always of limited image quality, and difficult to scale
143
+ to higher resolutions. Recently a series of 3D-based meth-
144
+ ods [4,20,39–41] have been developed. [39–41] utilize 3D
145
+ Morphable Models [2] for parametric control of the talk-
146
+ ing face.
147
+ More recently, the emerging Neural radiance
148
+ fields [26] provide a new solution for 3D-aware talking head
149
+ synthesis [3, 17, 24, 36]. However, most of these 3D-based
150
+ works highly rely on identity-specific training, and thus
151
+ cannot generalize across different identities. Shen et al. [36]
152
+ have tried to improve the generalization of the model, how-
153
+ ever, further fine-tuning on specific identities is still in-
154
+ evitable. In this work, we propose a brand-new diffusion
155
+ model-based framework for high-fidelity and generalized
156
+ talking head synthesis.
157
+ Latent Diffusion Models. Diffusion Probabilistic Mod-
158
+ els (DM) [37] have shown strong ability in various im-
159
+ age generation tasks [11, 19, 29]. However, due to pixel
160
+ space-based training [30,32], very high computational costs
161
+ are inevitable.
162
+ More recently, Rombach et al. [30] pro-
163
+ pose the Latent Diffusion Models (LDMs), and transfer the
164
+ training and inference processes of DM to a compressed
165
+ lower-dimension latent space for more efficient comput-
166
+ ing [13, 49]. With the democratizing of this technology, it
167
+ has been successfully employed in a series of works, in-
168
+ cluding text-to-image translation [21, 31, 33], super resolu-
169
+ tion [7, 12, 27], image inpainting [23, 25], motion genera-
170
+ tion [35,48], 3D-aware prediction [1,34,43]. In this work,
171
+ 2
172
+
173
+ Att
174
+ Att
175
+ Att
176
+ Att
177
+ Att
178
+ Att
179
+ Att
180
+ Att
181
+ ���0
182
+ ������
183
+
184
+ ������−1
185
+ ���1
186
+ ���
187
+ ������
188
+ ������
189
+ ������
190
+ Reference
191
+ Audio
192
+ Landmark
193
+ ������
194
+ concatenate
195
+ concatenate
196
+ ���
197
+ ������
198
+ ������−1
199
+ ������
200
+
201
+
202
+ Conditions
203
+ 0
204
+ Diffusion Process
205
+ Denoising Process
206
+ ������
207
+ ������
208
+ ���
209
+ ���
210
+ Figure 2. Overview of the proposed DiffTalk for generalized talking head video synthesis. Apart from the audio signal condition to drive
211
+ the lip motions, we further incorporate reference images and facial landmarks as extra driving factors for personalized facial modeling.
212
+ In this way, the talking head generation process is more controllable, which enables the learned model to generalize across different
213
+ identities without further fine-tuning. Furthermore, benefiting from the latent space learning mode, we can graceful improve our DiffTalk
214
+ for higher-resolution synthesis with slight extra computational cost.
215
+ drawing on these successful practices, we model the talk-
216
+ ing head synthesis as an audio-driven temporally coherent
217
+ denoising process and achieve superior generation results.
218
+ 3. Methodology
219
+ 3.1. Overview
220
+ To tackle the challenges of generation quality and model
221
+ generalization for better real-world deployment, we model
222
+ the talking head synthesis as an audio-driven temporally co-
223
+ herent denoising process, and term the proposed method as
224
+ DiffTalk. An overview of the proposed DiffTalk is shown in
225
+ Figure 2. By introducing smooth audio features as a condi-
226
+ tion, we improve the diffusion model for temporally coher-
227
+ ent facial motion modeling. For further personalized facial
228
+ modeling, we incorporate reference face images and facial
229
+ landmarks as extra driving factors. In this way, the talking
230
+ head generation process is more controllable, which enables
231
+ the learned model to generalize across different identities
232
+ without any further fine-tuning. Moreover, benefiting from
233
+ the latent space learning mode, we can graceful improve
234
+ our DiffTalk for higher-resolution synthesis with negligible
235
+ extra computational cost, which contributes to improving
236
+ the generation quality. In the following, we will detail the
237
+ proposed conditional Diffusion Models for high-fidelity and
238
+ generalized talking head generation in Section 3.2. In Sec-
239
+ tion 3.3, the progressive inference stage is introduced for
240
+ better inter-frame consistency.
241
+ 3.2. Conditional Diffusion Model for Talking Head
242
+ The emergence of Latent Diffusion Models (LDMs) [19,
243
+ 30] provides a straightforward and effective way for high-
244
+ fidelity image synthesis. To inherit its excellent properties,
245
+ we adopt this advanced technology as the foundation of our
246
+ method and explore its potential in modeling the dynamic
247
+ talking head. With a pair of well-trained image encoder EI
248
+ and decoder DI which are frozen in training [13], the in-
249
+ put face image x ∈ RH×W ×3 can be encoded into a latent
250
+ space z0 = EI(x) ∈ Rh×w×3, where H/h = W/w = f,
251
+ H, W are the height and width of the original image and
252
+ f is the downsampling factor. In this way, the learning is
253
+ transferred to a lower-dimensional latent space, which is
254
+ more efficient with fewer train resources. On this basis, the
255
+ standard LDMs are modeled as a time-conditional UNet-
256
+ based [32] denoising network M, which learns the reverse
257
+ process of a Markov Chain [15] of length T. The corre-
258
+ sponding objective can be formulated as:
259
+ LLDM := Ez,ϵ∼N (0,1),t
260
+
261
+ ∥ϵ − M (zt, t)∥2
262
+ 2
263
+
264
+ ,
265
+ (1)
266
+ where t ∈ [1, · · · , T] and zt is obtained through the forward
267
+ diffusion process from z0. ˜zt−1 = zt − M(zt, t) is the
268
+ denoising result of zt at time step t. The final denoised
269
+ result ˜z0 is then upsampled to the pixel space with the pre-
270
+ trained image decoder ˜x = DI(˜z0), where ˜x ∈ RH×W ×3
271
+ is the reconstructed face image.
272
+ Given a source identity and driven audio, our goal is to
273
+ train a model for generating a natural target talking video in
274
+ 3
275
+
276
+ Audio Stream
277
+ 16 time intervals
278
+ DeepSpeech RNN
279
+ Feature Map
280
+ Feature
281
+ Extractor
282
+ Temporal
283
+ Filtering
284
+ 16 windown size
285
+ Figure 3. Visualization of the smooth audio feature extractor. For
286
+ better temporal coherence, two-stage smoothing operations are in-
287
+ volved in this module.
288
+ synchronization with the audio condition while maintaining
289
+ the original identity information. Furthermore, the trained
290
+ model also needs to work for novel identities during infer-
291
+ ence. To this end, the audio signal is introduced as a basic
292
+ condition to guide the direction of the denoising process for
293
+ modeling the audio-to-lip translation.
294
+ Smooth Audio Feature Extraction. To better incorpo-
295
+ rate temporal information, we involve two-stage smoothing
296
+ operations in the audio encoder EA, as shown in Figure 3.
297
+ Firstly, following the practice in VOCA [9], we reorganize
298
+ the raw audio signal into overlapped windows of size 16
299
+ time intervals (corresponding to audio clips of 20ms), where
300
+ each window is centered on the corresponding video frame.
301
+ A pre-trained RNN-based DeepSpeech [18] module is then
302
+ leveraged to extract the per-frame audio feature map F. For
303
+ better inter-frame consistency, we further introduce a learn-
304
+ able temporal filtering [41]. It receives a sequence of adja-
305
+ cent audio features [Fi−w, . . . , Fi, . . . , Fi+w] with w = 8
306
+ as input, and computes the final smoothed audio feature for
307
+ the i-th frame as a ∈ RDA in a self-attention-based learn-
308
+ ing manner, where DA denotes the audio feature dimension.
309
+ By encoding the audio information, we bridge the modality
310
+ gap between the audio signals and the visual information.
311
+ Introducing such smooth audio features as a condition, we
312
+ extend the diffusion model for temporal coherence-aware
313
+ modeling of face dynamics when talking. The objective is
314
+ then formulated as:
315
+ LA := Ez,ϵ∼N (0,1),a,t
316
+
317
+ ∥ϵ − M (zt, t, a)∥2
318
+ 2
319
+
320
+ .
321
+ (2)
322
+ Identity-Preserving Model Generalization. In addi-
323
+ tion to learning the audio-to-lip translation, another essen-
324
+ tial task is to realize the model generalization while pre-
325
+ serving complete identity information in the source image.
326
+ Generalized identity information includes face appearance,
327
+ head pose, and image background. To this end, a reference
328
+ mechanism is designed to empower our model to general-
329
+ ize to new individuals unseen in training, as shown in Fig-
330
+ ure 2. Specifically, a random face image xr of the source
331
+ identity is chosen as a reference condition, which contains
332
+ appearance and background information. To prevent train-
333
+ ing shortcuts, we limit the selection of xr to 60 frames be-
334
+ yond the target image.
335
+ However, since the ground-truth
336
+ face image has a completely different pose from xr, the
337
+ model is expected to transfer the pose of xr to the target
338
+ face without any prior information. This is somehow an
339
+ ill-posed problem with no unique solution. For this rea-
340
+ son, we further incorporate the masked ground-truth im-
341
+ age xm as another reference condition to provide the target
342
+ head pose guidance. The mouth region of xm is completely
343
+ masked to ensure that the ground truth lip movements are
344
+ not visible to the network. In this way, the reference xr fo-
345
+ cuses on affording mouth appearance information, which
346
+ additionally reduces the training difficulty.
347
+ Before serv-
348
+ ing as conditions, xr and xm are also encoded into the la-
349
+ tent space through the trained image encoder, and we have
350
+ zr = DI(xr) ∈ Rh×w×3, zm = DI(xm) ∈ Rh×w×3. On
351
+ this basis, an auxiliary facial landmarks condition is also in-
352
+ cluded for better control of the face outline. Similarly, land-
353
+ marks in the mouth area are masked to avoid shortcuts. The
354
+ landmark feature l ∈ RDL is obtained with an MLP-based
355
+ encoder EL, where DL is the landmark feature dimension.
356
+ In this way, combining these conditions with audio feature
357
+ a, we realize the precise control over all key elements of
358
+ a dynamic talking face. With C = {a, zr, zm, l} denoting
359
+ the condition set, the talking head synthesis is finally mod-
360
+ eled as a conditional denoising process optimized with the
361
+ following objective:
362
+ L := Ez,ϵ∼N (0,1),C,t
363
+
364
+ ∥ϵ − M (zt, t, C)∥2
365
+ 2
366
+
367
+ ,
368
+ (3)
369
+ where the network parameters of M, EA and EL are jointly
370
+ optimized via this equation.
371
+ Conditioning Mechanisms. Based on the modeling of
372
+ the conditional denoising process in Eq. 3, we pass these
373
+ conditions C to the network in the manner shown in Fig-
374
+ ure 2. Specifically, following [30], we implement the UNet-
375
+ based backbone M with the cross-attention mechanism for
376
+ better multimodality learning. The spatially aligned refer-
377
+ ences zr and zm are concatenated channel-wise with the
378
+ noisy map zT to produce a joint visual condition Cv =
379
+ [zT ; zm; zr] ∈ Rh×w×9. Cv is fed to the first layer of the
380
+ network to directly guide the output face in an image-to-
381
+ image translation fashion. Additionally, the driven-audio
382
+ feature a and the landmark representation l are concatenated
383
+ into a latent condition Cl = [a; l] ∈ RDA+DL, which serves
384
+ as the key and value for the intermediate cross-attention
385
+ layers of M.
386
+ To this extent, all condition information
387
+ C = {Cv, Cl} are properly integrated into the denoising
388
+ network M to guide the talking head generation process.
389
+ 4
390
+
391
+ DDIM-based Denoising
392
+ ������,1
393
+ Random ������,1
394
+ ���1
395
+ ������,2
396
+ ������,2
397
+ ���2
398
+ ������
399
+ DDIM-based Denoising
400
+ DDIM-based Denoising
401
+ ������,���
402
+
403
+ ������,���
404
+ Figure 4. Illustration of the designed progressive inference strat-
405
+ egy. For the first frame, the setting of the visual condition Cv
406
+ remains the same as for training, where xr,1 is a random face im-
407
+ age from the target identity. Subsequently, the synthetic image ˜xi
408
+ is employed as the reference condition xr,i+1 for the next frame
409
+ to enhance the temporal coherence of the generated video.
410
+ Higher-Resolution Talking Head Synthesis Our pro-
411
+ posed DiffTalk can also be gracefully extended for higher-
412
+ resolution talking head synthesis with negligible extra com-
413
+ putational cost and faithful reconstruction effects. Specif-
414
+ ically, considering the trade-off between the perceptual
415
+ loss and the compression rate, for training images of size
416
+ 256 × 256 × 3, we set the downsampling factor as f = 4
417
+ and obtain the latent space of 64 × 64 × 3. Furthermore,
418
+ for higher-resolution generation of 512 × 512 × 3, we just
419
+ need to adjust the paired image encoder EI and decoder DI
420
+ with a bigger downsampling factor f = 8. Then the trained
421
+ encoder is frozen and employed to transfer the training pro-
422
+ cess to a 64 × 64 × 3 latent space as well. This helps to
423
+ relieve the pressure on insufficient resources, and therefore
424
+ our model can be gracefully improved for higher-resolution
425
+ talking head video synthesis.
426
+ 3.3. Progressive Inference
427
+ We perform inference with Denoising Diffusion Implicit
428
+ Model-based (DDIM) [38] iterative denoising steps. DDIM
429
+ is a variant of the standard DM to accelerate sampling for
430
+ more efficient synthesis. To further boost the coherence of
431
+ the generated talking videos, we develop a progressive ref-
432
+ erence strategy in the reference process as shown in Fig-
433
+ ure 4. Specifically, when rendering a talking video sequence
434
+ with the trained model, for the first frame, the setting of the
435
+ visual condition Cv remains the same as for training, where
436
+ xr,1 is a random face image from the target identity. Sub-
437
+ sequently, this synthetic face image is exploited as the xr
438
+ for the next frame. In this way, image details between adja-
439
+ cent frames remain consistent, resulting in a smoother tran-
440
+ sition between frames. It is worth noting that this strategy
441
+ is not used for training. Since the difference between adja-
442
+ cent frames is small, we need to eliminate such references
443
+ to avoid learning shortcuts.
444
+ 0
445
+ 100
446
+ 0
447
+ 100
448
+ GT
449
+ w.o. Smooth
450
+ w. Smooth
451
+ Figure 5. Ablation study on the audio smoothing operation. We
452
+ show the differences between adjacent frames as heatmaps for bet-
453
+ ter visualization. The results without audio filtering present obvi-
454
+ ous high heat values in the mouth region, which indicates the jitters
455
+ in this area. By contrast, with smooth audio as the condition, the
456
+ generated video frames show smoother transitions.
457
+ 4. Experiments
458
+ 4.1. Experimental Settings
459
+ Dataset. To train the audio-driven diffusion model, an
460
+ audio-visual dataset HDTF [51] is used.
461
+ It contains 16
462
+ hours of talking videos in 720P or 1080P from more than
463
+ 300 identities.
464
+ We randomly select 100 videos with the
465
+ length of about 5 hours for training, while the remaining
466
+ data serve as the test set. Apart from this public dataset, we
467
+ also use some other videos for cross-dataset evaluation.
468
+ Metric. We evaluate our proposed method through vi-
469
+ sual results coupled with quantitative indicators.
470
+ PSNR
471
+ (↑), SSIM (↑) [45] and LPIPS (↓) [49] are three metrics
472
+ for assessing image quality. The LPIPS is a learning-based
473
+ perceptual similarity measure that is more in line with hu-
474
+ man perception, we therefore recommend this metric as a
475
+ more objective indicator.
476
+ The SyncNet score (Offset↓ /
477
+ Confidence↑) [8] checks the audio-visual synchronization
478
+ quality, which is important for the audio-driven talking head
479
+ generation task.
480
+ (‘↓’ indicates that the lower the better,
481
+ while ‘↑’ means that the higher the better.)
482
+ Implementation Details. We resize the input image to
483
+ 256 × 256 for experiments. The downsampling factor f is
484
+ set as 4, so the latent space is 64 × 64 × 3. For training the
485
+ model for higher resolution synthesis, the input is resized to
486
+ 512 × 512 with f = 8 to keep the same size of latent space.
487
+ The length of the denoising step T is set as 200 for both the
488
+ 5
489
+
490
+ Ground Truth
491
+ A
492
+ A + L
493
+ A + L + R
494
+ A + M
495
+ A + L + M + R
496
+ ID 1
497
+ ID 2
498
+ Figure 6. Ablation study on the design of the conditions. The marks above these images refer to the following meanings, ‘A’: Audio;
499
+ ‘L’: Landmark; ‘R’: Random reference image; ‘M’: Masked ground-truth image. We show the generated results under different condition
500
+ settings on two test sets, and demonstrate the effectiveness of our final design, i.e. A+L+M+R.
501
+ Method
502
+ PSNR↑ SSIM↑ LPIPS↓ SyncNet↓↑
503
+ Test Set A
504
+ GT
505
+ -
506
+ -
507
+ -
508
+ 0/9.610
509
+ w/o
510
+ 33.67
511
+ 0.944
512
+ 0.024
513
+ 1/5.484
514
+ w
515
+ 34.17
516
+ 0.946
517
+ 0.024
518
+ 1/6.287
519
+ Test Set B
520
+ GT
521
+ -
522
+ -
523
+ -
524
+ 0/9.553
525
+ w/o
526
+ 32.70
527
+ 0.924
528
+ 0.031
529
+ 1/5.197
530
+ w
531
+ 32.73
532
+ 0.925
533
+ 0.031
534
+ 1/5.387
535
+ Table 1. Ablation study to investigate the contribution of the audio
536
+ smoothing operation. ‘w’ indicates the model is trained with the
537
+ audio features after temporal filtering and vice versa.
538
+ training and inference process. The feature dimensions are
539
+ DA = DL = 64. Our model takes about 15 hours to train
540
+ on 8 NVIDIA 3090Ti GPUs.
541
+ 4.2. Ablation Study
542
+ Effect of the Smooth Audio. In this subsection, we in-
543
+ vestigate the effect of the audio smooth operations. Quanti-
544
+ tative results in Table 1 show that the model equipped with
545
+ the audio temporal filtering module outperforms the one
546
+ without smooth audio, especially in the SyncNet score. We
547
+ further visualize the differences between adjacent frames as
548
+ the heatmaps shown in Figure 5. The results without audio
549
+ filtering present obvious high heat values in the mouth re-
550
+ gion, which indicates the jitters in this area. By contrast,
551
+ with smooth audio as the condition, the generated video
552
+ frames show smoother transitions, which are reflected in the
553
+ soft differences of adjacent frames.
554
+ Design of the Conditions. A major contribution of this
555
+ work is the ingenious design of the conditions for general
556
+ and high-fidelity talking head synthesis. In Figure 6, we
557
+ show the generated results under different condition settings
558
+ step by step, to demonstrate the superiority of our design.
559
+ Method
560
+ PSNR↑ SSIM↑ LPIPS↓ SyncNet↓↑
561
+ Test Set A
562
+ GT
563
+ -
564
+ -
565
+ -
566
+ 4/7.762
567
+ w/o
568
+ 34.17
569
+ 0.946
570
+ 0.024
571
+ 1/6.287
572
+ w
573
+ 33.95
574
+ 0.946
575
+ 0.023
576
+ -1/6.662
577
+ Test Set B
578
+ GT
579
+ -
580
+ -
581
+ -
582
+ 3/8.947
583
+ w/o
584
+ 32.73
585
+ 0.925
586
+ 0.031
587
+ 1/5.387
588
+ w
589
+ 33.02
590
+ 0.925
591
+ 0.030
592
+ 1/5.999
593
+ Table 2. Ablation study on the effect of the progressive inference
594
+ strategy. ‘w/o’ indicates that a random reference image is em-
595
+ ployed as the condition, and ‘w’ means that the reference is the
596
+ generated result of the previous frame.
597
+ With pure audio as the condition, the model fails to gener-
598
+ alize to new identities, and the faces are not aligned with the
599
+ background in the inpainting-based inference. Adding land-
600
+ marks as another condition tackles the misalignment prob-
601
+ lem. A random reference image is further introduced try-
602
+ ing to provide the identity information. Whereas, since the
603
+ ground-truth face image has a different pose from this ran-
604
+ dom reference, the model is expected to transfer the pose of
605
+ reference to the target face. This greatly increases the diffi-
606
+ culty of training, leading to hard network convergence, and
607
+ the identity information is not well learned. Using the au-
608
+ dio and masked ground-truth images as driving factors mit-
609
+ igates the identity inconsistency and misalignment issues,
610
+ however the appearance of the mouth can not be learned
611
+ since this information is not visible to the network. For
612
+ this reason, we employ the random reference face and the
613
+ masked ground-truth image together for dual driving, where
614
+ the random reference provides the lip appearance message
615
+ and the masked ground-truth controls the head pose and
616
+ identity. Facial landmarks are also incorporated as a con-
617
+ dition that helps to model the facial contour better. Results
618
+ 6
619
+
620
+ GT
621
+ ATVG
622
+ MakeItTalk
623
+ Wav2Lip
624
+ Ours
625
+ DFRF
626
+ AD-NeRF
627
+ 3D-based Methods
628
+ 2D-based Methods
629
+ Figure 7. Visual comparison with some representative 2D-based talking head generation methods ATVGnet [5], MakeitTalk [52] and
630
+ Wav2Lip [28], and with some recent 3D-based ones AD-NeRF [17] and DFRF [36]. The results of DFRF are synthesized with the base
631
+ model without fine-tuning for fair comparisons. AD-NeRF is trained on these two identities respectively to produce the results.
632
+ in Figure 6 show the effectiveness of such design in synthe-
633
+ sizing realism and controllable face images.
634
+ Impact of the Progressive Inference. Temporal corre-
635
+ lation inference is developed in this work through the pro-
636
+ gressive reference strategy. We conduct an ablation study
637
+ in Table 2 to investigate the impact of this design. ‘w/o’ in-
638
+ dicates that a random reference image xr is employed, and
639
+ ‘w’ means that the generated result of the previous frame
640
+ is chosen as the reference condition. With such progressive
641
+ inference, the SyncNet scores are further boosted, since the
642
+ temporal correlation is better modeled and the talking style
643
+ becomes more coherent. The LPIPS indicator is also en-
644
+ hanced with this improvement. PSNR tends to give higher
645
+ scores to blurry images [49], so we recommend LPIPS as a
646
+ more representative metric for visual quality.
647
+ 4.3. Method Comparison
648
+ Comparison with 2D-based Methods. In this section,
649
+ we perform method comparisons with some representative
650
+ 2D-based talking head generation approaches including the
651
+ ATVGnet [5], MakeitTalk [52] and Wav2Lip [28]. Figure 7
652
+ visualizes the generated frames of these methods. It can
653
+ be seen that the ATVGnet performs generation based on
654
+ cropped faces with limited image quality. The MakeItTalk
655
+ synthesizes plausible talking frames, however the back-
656
+ ground is wrongly wrapped with the mouth movements.
657
+ This phenomenon is more observable in the video form
658
+ result, and greatly affects the visual experience.
659
+ Gener-
660
+ ated talking faces of Wav2Lip appear artifacts in the square
661
+ boundary centered on the mouth, since the synthesized area
662
+ 7
663
+
664
+ Method
665
+ Test Set A
666
+ Test Set B
667
+ General
668
+ PSNR↑
669
+ SSIM↑
670
+ LPIPS↓
671
+ SyncNet↓↑
672
+ PSNR↑
673
+ SSIM↑
674
+ LPIPS↓
675
+ SyncNet↓↑
676
+ Method
677
+ GT
678
+ -
679
+ -
680
+ -
681
+ -1/8.979
682
+ -
683
+ -
684
+ -
685
+ -2/7.924
686
+ -
687
+ MakeItTalk [52]
688
+ 18.77
689
+ 0.544
690
+ 0.19
691
+ -4/3.936
692
+ 17.70
693
+ 0.648
694
+ 0.129
695
+ -3/3.416
696
+
697
+ Wav2Lip [28]
698
+ 25.50
699
+ 0.761
700
+ 0.140
701
+ -2/8.936
702
+ 33.38
703
+ 0.942
704
+ 0.027
705
+ -3/9.385
706
+
707
+ AD-NeRF [17]
708
+ 27.89
709
+ 0.885
710
+ 0.072
711
+ -2/5.639
712
+ 30.14
713
+ 0.947
714
+ 0.023
715
+ -3/4.246
716
+ 
717
+ DFRF [36]
718
+ 28.60
719
+ 0.892
720
+ 0.068
721
+ -1/5.999
722
+ 33.57
723
+ 0.949
724
+ 0.025
725
+ -2/4.432
726
+ FT Req.
727
+ Ours
728
+ 34.54
729
+ 0.950
730
+ 0.024
731
+ -1/6.381
732
+ 34.01
733
+ 0.950
734
+ 0.020
735
+ -1/5.639
736
+
737
+ Table 3. Comparison with some representative talking head synthesis methods on two test sets as in Figure 7. The best performance is
738
+ highlighted in red (1st best) and blue (2nd best). Our DiffTalk obtains the best PSNR, SSIM, and LPIPS values, and comparable SyncNet
739
+ scores simultaneously. It is worth noting that the DFRF is fine-tuned on the specific identity to obtain these results, while our method can
740
+ directly be utilized for generation without further fine-tuning. (‘FT Req.’ means that fine-tuning operation is required for the DFRF.)
741
+ and the original image are not well blended. By contrast,
742
+ the proposed DiffTalk generates natural and realistic talk-
743
+ ing videos with accurate audio-lip synchronization, owing
744
+ to the crafted conditioning mechanism and stable training
745
+ process. For more objective comparisons, we further eval-
746
+ uate the quantitative results in Table 3. Our DiffTalk far
747
+ surpasses [28] and [52] in all image quality metrics. For
748
+ the audio-visual synchronization metric SyncNet, the pro-
749
+ posed method reaches a high level and is superior than
750
+ MakeItTalk. Although the DiffTalk is slightly inferior to
751
+ Wav2Lip on the SyncNet score, it is far better than Wav2Lip
752
+ in terms of image quality. In conclusion, our method outper-
753
+ forms these 2D-based methods under comprehensive con-
754
+ sideration of the qualitative and quantitative results.
755
+ Comparison with 3D-based Methods. For more com-
756
+ prehensive evaluations, we further compare with some
757
+ recent high-performance 3D-based works including AD-
758
+ NeRF [17] and DFRF [36]. They realize implicitly 3D head
759
+ modeling with the NeRF technology, so we treat them as
760
+ generalized 3D-based methods. The visualization results
761
+ are shown in Figure 7. AD-NeRF models the head and torso
762
+ parts separately, resulting in misalignment in the neck re-
763
+ gion. More importantly, it is worth noting that AD-NeRF
764
+ is a non-general method. In contrast, our method is able to
765
+ handle unseen identities without further fine-tuning, which
766
+ is more in line with the practical application scenarios. The
767
+ DFRF relies heavily on the fine-tuning operation for model
768
+ generalization, and the generated talking faces with only
769
+ the base model are far from satisfactory as shown in Fig-
770
+ ure 7. More quantitative results in Table 3 also show that our
771
+ method surpasses [17, 36] on the image quality and audio-
772
+ visual synchronization indicators.
773
+ 4.4. Expand to Higher Resolution
774
+ In this section, we perform experiments to demonstrate
775
+ the capacity of our method on generating higher-resolution
776
+ images. In Figure 8, we show the synthesis frames of two
777
+ models (a) and (b). (a) is trained on 256 × 256 images with
778
+ the downsampling factor f = 4, so the latent space is of
779
+ (a) Resolution: 256 × 256, ���=4
780
+ (b) Resolution: 512 × 512, ���=8
781
+ Figure 8. Generated results with higher resolution.
782
+ size 64 × 64 × 3. For (b), 512 × 512 images with f =
783
+ 8 are used for training the model. Since both models are
784
+ trained based on a compressed 64 × 63 × 3 latent space,
785
+ the pressure of insufficient computing resources is relieved.
786
+ We can therefore comfortably expand our model for higher-
787
+ resolution generation just as shown in Figure 8, where the
788
+ synthesis quality in (b) significantly outperforms that in (a).
789
+ 5. Conclusion and Discussion
790
+ In this paper, we have proposed a generalized and high-
791
+ fidelity talking head synthesis method based on a crafted
792
+ conditional diffusion model. Apart from the audio signal
793
+ condition to drive the lip motions, we further incorporate
794
+ reference images as driving factors to model the personal-
795
+ ized appearance, which enables the learned model to com-
796
+ fortably generalize across different identities without any
797
+ further fine-tuning. Furthermore, our proposed DiffTalk can
798
+ be gracefully tailored for higher-resolution synthesis with
799
+ negligible extra computational cost.
800
+ Limitations. The proposed method models talking head
801
+ generation as an iterative denoising process, which needs
802
+ more time to synthesize a frame compared with most GAN-
803
+ based approaches. This is also a common problem of LDM-
804
+ based works which warrants further research. Nonetheless,
805
+ we have a large speed advantage over most 3D-based meth-
806
+ ods. Since talking head technology may raise potential mis-
807
+ use issues, we are committed to combating these malicious
808
+ behaviors and advocate positive applications. Additionally,
809
+ researchers who want to use our code will be required to get
810
+ authorization and add watermarks to the generated videos.
811
+ 8
812
+
813
+ References
814
+ [1] Miguel Angel Bautista, Pengsheng Guo, Samira Abnar, Wal-
815
+ ter Talbott, Alexander Toshev, Zhuoyuan Chen, Laurent
816
+ Dinh, Shuangfei Zhai, Hanlin Goh, Daniel Ulbricht, et al.
817
+ Gaudi: A neural architect for immersive 3d scene genera-
818
+ tion. arXiv, 2022. 2
819
+ [2] Volker Blanz and Thomas Vetter. A morphable model for the
820
+ synthesis of 3d faces. In SIGGRAPH, 1999. 2
821
+ [3] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu,
822
+ and Gordon Wetzstein.
823
+ pi-gan: Periodic implicit genera-
824
+ tive adversarial networks for 3d-aware image synthesis. In
825
+ CVPR, 2021. 2
826
+ [4] Lele Chen, Guofeng Cui, Celong Liu, Zhong Li, Ziyi Kou, Yi
827
+ Xu, and Chenliang Xu. Talking-head generation with rhyth-
828
+ mic head motion. In ECCV, 2020. 1, 2
829
+ [5] Lele Chen, Ross K Maddox, Zhiyao Duan, and Chenliang
830
+ Xu. Hierarchical cross-modal talking face generation with
831
+ dynamic pixel-wise loss. In CVPR, 2019. 7
832
+ [6] Michail Christos Doukas, Stefanos Zafeiriou, and Viktoriia
833
+ Sharmanska. Headgan: Video-and-audio-driven talking head
834
+ synthesis. arXiv, 2020. 1, 2
835
+ [7] Hyungjin Chung, Jeongsol Kim, Michael T Mccann, Marc L
836
+ Klasky, and Jong Chul Ye. Diffusion posterior sampling for
837
+ general noisy inverse problems. arXiv, 2022. 2
838
+ [8] Joon Son Chung and Andrew Zisserman. Out of time: auto-
839
+ mated lip sync in the wild. In ACCV, 2016. 2, 5
840
+ [9] Daniel Cudeiro, Timo Bolkart, Cassidy Laidlaw, Anurag
841
+ Ranjan, and Michael J Black. Capture, learning, and syn-
842
+ thesis of 3d speaking styles. In CVPR, 2019. 4
843
+ [10] Dipanjan Das, Sandika Biswas, Sanjana Sinha, and Brojesh-
844
+ war Bhowmick. Speech-driven facial animation using cas-
845
+ caded gans for learning of motion and texture. In ECCV,
846
+ 2020. 1, 2
847
+ [11] Prafulla Dhariwal and Alexander Nichol. Diffusion models
848
+ beat gans on image synthesis. NeurIPS, 2021. 2
849
+ [12] Marcelo dos Santos, Rayson Laroca, Rafael O Ribeiro, Jo˜ao
850
+ Neves, Hugo Proenc¸a, and David Menotti.
851
+ Face super-
852
+ resolution using stochastic differential equations.
853
+ arXiv,
854
+ 2022. 2
855
+ [13] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming
856
+ transformers for high-resolution image synthesis. In CVPR,
857
+ 2021. 2, 3
858
+ [14] Pablo Garrido, Levi Valgaerts, Hamid Sarmadi, Ingmar
859
+ Steiner,
860
+ Kiran Varanasi,
861
+ Patrick Perez,
862
+ and Christian
863
+ Theobalt. Vdub: Modifying face video of actors for plau-
864
+ sible visual alignment to a dubbed audio track. In Computer
865
+ Graph. Forum, 2015. 2
866
+ [15] Charles J Geyer. Practical markov chain monte carlo. Statis-
867
+ tical science, 1992. 3
868
+ [16] Kuangxiao Gu, Yuqian Zhou, and Thomas Huang. Flnet:
869
+ Landmark driven fetching and learning network for faithful
870
+ talking facial animation synthesis. In AAAI, 2020. 1, 2
871
+ [17] Yudong Guo, Keyu Chen, Sen Liang, Yongjin Liu, Hujun
872
+ Bao, and Juyong Zhang. Ad-nerf: Audio driven neural radi-
873
+ ance fields for talking head synthesis. In ICCV, 2021. 2, 7,
874
+ 8
875
+ [18] Awni Hannun, Carl Case, Jared Casper, Bryan Catanzaro,
876
+ Greg Diamos, Erich Elsen, Ryan Prenger, Sanjeev Satheesh,
877
+ Shubho Sengupta, Adam Coates, et al. Deep speech: Scaling
878
+ up end-to-end speech recognition. arXiv, 2014. 4
879
+ [19] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffu-
880
+ sion probabilistic models. NeurIPS, 2020. 2, 3
881
+ [20] Xinya Ji,
882
+ Hang Zhou,
883
+ Kaisiyuan Wang,
884
+ Wayne Wu,
885
+ Chen Change Loy, Xun Cao, and Feng Xu. Audio-driven
886
+ emotional video portraits. In CVPR, 2021. 2
887
+ [21] Bahjat Kawar, Shiran Zada, Oran Lang, Omer Tov, Huiwen
888
+ Chang, Tali Dekel, Inbar Mosseri, and Michal Irani. Imagic:
889
+ Text-based real image editing with diffusion models. arXiv,
890
+ 2022. 2
891
+ [22] Prajwal KR, Rudrabha Mukhopadhyay, Jerin Philip, Ab-
892
+ hishek Jha, Vinay Namboodiri, and CV Jawahar. Towards
893
+ automatic face-to-face translation. In ACMMM, 2019. 1
894
+ [23] Wing-Fung Ku, Wan-Chi Siu, Xi Cheng, and H Anthony
895
+ Chan. Intelligent painter: Picture composition with resam-
896
+ pling diffusion model. arXiv, 2022. 2
897
+ [24] Xian Liu, Yinghao Xu, Qianyi Wu, Hang Zhou, Wayne
898
+ Wu, and Bolei Zhou. Semantic-aware implicit neural audio-
899
+ driven video portrait generation. arXiv, 2022. 2
900
+ [25] Andreas Lugmayr, Martin Danelljan, Andres Romero, Fisher
901
+ Yu, Radu Timofte, and Luc Van Gool. Repaint: Inpainting
902
+ using denoising diffusion probabilistic models.
903
+ In CVPR,
904
+ 2022. 2
905
+ [26] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik,
906
+ Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf:
907
+ Representing scenes as neural radiance fields for view syn-
908
+ thesis. In ECCV, 2020. 2
909
+ [27] Kushagra Pandey, Avideep Mukherjee, Piyush Rai, and Ab-
910
+ hishek Kumar. Diffusevae: Efficient, controllable and high-
911
+ fidelity generation from low-dimensional latents.
912
+ arXiv,
913
+ 2022. 2
914
+ [28] KR Prajwal, Rudrabha Mukhopadhyay, Vinay P Nambood-
915
+ iri, and CV Jawahar. A lip sync expert is all you need for
916
+ speech to lip generation in the wild. In ACMMM, 2020. 1, 2,
917
+ 7, 8
918
+ [29] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray,
919
+ Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever.
920
+ Zero-shot text-to-image generation. In ICML, 2021. 2
921
+ [30] Robin Rombach, Andreas Blattmann, Dominik Lorenz,
922
+ Patrick Esser, and Bj¨orn Ommer. High-resolution image syn-
923
+ thesis with latent diffusion models. In CVPR, 2022. 2, 3, 4
924
+ [31] Robin Rombach, Andreas Blattmann, and Bj¨orn Ommer.
925
+ Text-guided synthesis of artistic images with retrieval-
926
+ augmented diffusion models. arXiv, 2022. 2
927
+ [32] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net:
928
+ Convolutional networks for biomedical image segmentation.
929
+ In MICCAI, 2015. 2, 3
930
+ [33] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch,
931
+ Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine
932
+ tuning text-to-image diffusion models for subject-driven
933
+ generation. arXiv, 2022. 2
934
+ [34] Saeed Saadatnejad, Ali Rasekh, Mohammadreza Mofayezi,
935
+ Yasamin Medghalchi, Sara Rajabzadeh, Taylor Mordan, and
936
+ Alexandre Alahi. A generic diffusion-based approach for 3d
937
+ human pose prediction in the wild. arXiv, 2022. 2
938
+ 9
939
+
940
+ [35] Ruizhi Shao, Zerong Zheng, Hongwen Zhang, Jingxiang
941
+ Sun, and Yebin Liu. Diffustereo: High quality human re-
942
+ construction via diffusion-based stereo using sparse cameras.
943
+ arXiv, 2022. 2
944
+ [36] Shuai Shen, Wanhua Li, Zheng Zhu, Yueqi Duan, Jie Zhou,
945
+ and Jiwen Lu. Learning dynamic facial radiance fields for
946
+ few-shot talking head synthesis. In ECCV, 2022. 2, 7, 8
947
+ [37] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan,
948
+ and Surya Ganguli.
949
+ Deep unsupervised learning using
950
+ nonequilibrium thermodynamics. In ICML, 2015. 2
951
+ [38] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denois-
952
+ ing diffusion implicit models. arXiv, 2020. 5
953
+ [39] Linsen Song, Wayne Wu, Chen Qian, Ran He, and
954
+ Chen Change Loy. Everybody’s talkin’: Let me talk as you
955
+ want. arXiv, 2020. 2
956
+ [40] Supasorn
957
+ Suwajanakorn,
958
+ Steven
959
+ M
960
+ Seitz,
961
+ and
962
+ Ira
963
+ Kemelmacher-Shlizerman.
964
+ Synthesizing obama:
965
+ learn-
966
+ ing lip sync from audio. TOG, 2017. 2
967
+ [41] Justus Thies, Mohamed Elgharib, Ayush Tewari, Christian
968
+ Theobalt, and Matthias Nießner.
969
+ Neural voice puppetry:
970
+ Audio-driven facial reenactment. In ECCV, 2020. 2, 4
971
+ [42] Justus Thies, Michael Zollhofer, Marc Stamminger, Chris-
972
+ tian Theobalt, and Matthias Nießner. Face2face: Real-time
973
+ face capture and reenactment of rgb videos. In CVPR, 2016.
974
+ 2
975
+ [43] Dominik JE Waibel, Ernst R¨ooell, Bastian Rieck, Raja
976
+ Giryes, and Carsten Marr. A diffusion model predicts 3d
977
+ shapes from 2d microscopy images. arXiv, 2022. 2
978
+ [44] Ting-Chun Wang, Arun Mallya, and Ming-Yu Liu. One-shot
979
+ free-view neural talking-head synthesis for video conferenc-
980
+ ing. In CVPR, 2021. 1
981
+ [45] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P
982
+ Simoncelli. Image quality assessment: from error visibility
983
+ to structural similarity. TIP, 2004. 5
984
+ [46] Shunyu Yao, RuiZhe Zhong, Yichao Yan, Guangtao Zhai,
985
+ and Xiaokang Yang.
986
+ Dfa-nerf: Personalized talking head
987
+ generation via disentangled face attributes neural rendering.
988
+ arXiv, 2022. 2
989
+ [47] Egor Zakharov, Aliaksandra Shysheya, Egor Burkov, and
990
+ Victor Lempitsky. Few-shot adversarial learning of realis-
991
+ tic neural talking head models. In ICCV, 2019. 1
992
+ [48] Mingyuan Zhang, Zhongang Cai, Liang Pan, Fangzhou
993
+ Hong, Xinying Guo, Lei Yang, and Ziwei Liu. Motiondif-
994
+ fuse: Text-driven human motion generation with diffusion
995
+ model. arXiv, 2022. 2
996
+ [49] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman,
997
+ and Oliver Wang. The unreasonable effectiveness of deep
998
+ features as a perceptual metric. In CVPR, 2018. 2, 5, 7
999
+ [50] Xi Zhang, Xiaolin Wu, Xinliang Zhai, Xianye Ben, and
1000
+ Chengjie Tu. Davd-net: Deep audio-aided video decompres-
1001
+ sion of talking heads. In CVPR, 2020. 1
1002
+ [51] Zhimeng Zhang, Lincheng Li, Yu Ding, and Changjie Fan.
1003
+ Flow-guided one-shot talking face generation with a high-
1004
+ resolution audio-visual dataset. In CVPR, 2021. 5
1005
+ [52] Yang Zhou, Xintong Han, Eli Shechtman, Jose Echevar-
1006
+ ria, Evangelos Kalogerakis, and Dingzeyu Li. Makelttalk:
1007
+ speaker-aware talking-head animation. TOG, 2020. 1, 2, 7,
1008
+ 8
1009
+ [53] Michael Zollh¨ofer, Justus Thies, Pablo Garrido, Derek
1010
+ Bradley, Thabo Beeler, Patrick P´erez, Marc Stamminger,
1011
+ Matthias Nießner, and Christian Theobalt. State of the art
1012
+ on monocular 3d face reconstruction, tracking, and applica-
1013
+ tions. In Computer Graphics Forum, 2018. 2
1014
+ 10
1015
+
CdE2T4oBgHgl3EQfSAcB/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
DNE0T4oBgHgl3EQfggEA/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c17fa2bcbd4454ccb848c1c68489d5715039e0ba46532f3e1a610c469f6bc3f6
3
+ size 5570605
FdA0T4oBgHgl3EQfBP_A/content/2301.01974v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bd7a603f14cd143947949ebe63e3c0929bf9523ff746422d0a38fa2b5aa28ab
3
+ size 186096
FdA0T4oBgHgl3EQfBP_A/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e1a91d36a599e5cc1e8416e6351f0c3a6cec4597ea888b57b181edf3427645f
3
+ size 2031661
FdA0T4oBgHgl3EQfBP_A/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e4141e186644bf5907d96ea1d62e0a26b3ad066026ee564024df5c9e7b5c8df
3
+ size 75613
HNFJT4oBgHgl3EQfuC2G/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5a13ae3972787bcffb53e9c2e3633d5aa1597fee59362c2b99c98bc970cbd49
3
+ size 57167
I9AyT4oBgHgl3EQfsPkA/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f89a2b50b1d690c794a448b4edccbc4722538e5a3729e8f7acb8346ea1b6af91
3
+ size 103256